in_source_id
string
before_files
list
after_files
list
pr_diff
string
issue
string
searx__searx-2132
[ { "content": "#!/usr/bin/env python\n\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\nif __name__ == '__main__':\n from sys import path\n from os.path import realpath, dirname\n path.append(realpath(dirname(realpath(__file__)) + '/../'))\n\nimport hashlib\nimport hmac\nimport json\nimport os\nimport sys\n\nimport requests\n\nfrom searx import logger\nlogger = logger.getChild('webapp')\n\ntry:\n from pygments import highlight\n from pygments.lexers import get_lexer_by_name\n from pygments.formatters import HtmlFormatter\nexcept:\n logger.critical(\"cannot import dependency: pygments\")\n from sys import exit\n exit(1)\ntry:\n from cgi import escape\nexcept:\n from html import escape\nfrom six import next\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom flask import (\n Flask, request, render_template, url_for, Response, make_response,\n redirect, send_from_directory\n)\nfrom babel.support import Translations\nimport flask_babel\nfrom flask_babel import Babel, gettext, format_date, format_decimal\nfrom flask.ctx import has_request_context\nfrom flask.json import jsonify\nfrom searx import brand, static_path\nfrom searx import settings, searx_dir, searx_debug\nfrom searx.exceptions import SearxParameterException\nfrom searx.engines import (\n categories, engines, engine_shortcuts, get_engines_stats, initialize_engines\n)\nfrom searx.utils import (\n UnicodeWriter, highlight_content, html_to_text, get_resources_directory,\n get_static_files, get_result_templates, get_themes, gen_useragent,\n dict_subset, prettify_url, match_language\n)\nfrom searx.version import VERSION_STRING\nfrom searx.languages import language_codes as languages\nfrom searx.search import SearchWithPlugins, get_search_query_from_webapp\nfrom searx.query import RawTextQuery\nfrom searx.autocomplete import searx_bang, backends as autocomplete_backends\nfrom searx.plugins import plugins\nfrom searx.plugins.oa_doi_rewrite import get_doi_resolver\nfrom searx.preferences import Preferences, ValidationException, LANGUAGE_CODES\nfrom searx.answerers import answerers\nfrom searx.url_utils import urlencode, urlparse, urljoin\nfrom searx.utils import new_hmac\n\n# check if the pyopenssl package is installed.\n# It is needed for SSL connection without trouble, see #298\ntry:\n import OpenSSL.SSL # NOQA\nexcept ImportError:\n logger.critical(\"The pyopenssl package has to be installed.\\n\"\n \"Some HTTPS connections will fail\")\n\ntry:\n from cStringIO import StringIO\nexcept:\n from io import StringIO\n\n\nif sys.version_info[0] == 3:\n unicode = str\n PY3 = True\nelse:\n logger.warning('\\033[1;31m Python2 is no longer supported\\033[0m')\n exit(1)\n\n# serve pages with HTTP/1.1\nfrom werkzeug.serving import WSGIRequestHandler\nWSGIRequestHandler.protocol_version = \"HTTP/{}\".format(settings['server'].get('http_protocol_version', '1.0'))\n\n# about static\nstatic_path = get_resources_directory(searx_dir, 'static', settings['ui']['static_path'])\nlogger.debug('static directory is %s', static_path)\nstatic_files = get_static_files(static_path)\n\n# about templates\ndefault_theme = settings['ui']['default_theme']\ntemplates_path = get_resources_directory(searx_dir, 'templates', settings['ui']['templates_path'])\nlogger.debug('templates directory is %s', templates_path)\nthemes = get_themes(templates_path)\nresult_templates = get_result_templates(templates_path)\nglobal_favicons = []\nfor indice, theme in enumerate(themes):\n global_favicons.append([])\n theme_img_path = os.path.join(static_path, 'themes', theme, 'img', 'icons')\n for (dirpath, dirnames, filenames) in os.walk(theme_img_path):\n global_favicons[indice].extend(filenames)\n\n# Flask app\napp = Flask(\n __name__,\n static_folder=static_path,\n template_folder=templates_path\n)\n\napp.jinja_env.trim_blocks = True\napp.jinja_env.lstrip_blocks = True\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.secret_key = settings['server']['secret_key']\n\nif not searx_debug \\\n or os.environ.get(\"WERKZEUG_RUN_MAIN\") == \"true\" \\\n or os.environ.get('UWSGI_ORIGINAL_PROC_NAME') is not None:\n initialize_engines(settings['engines'])\n\nbabel = Babel(app)\n\nrtl_locales = ['ar', 'arc', 'bcc', 'bqi', 'ckb', 'dv', 'fa', 'fa_IR', 'glk', 'he',\n 'ku', 'mzn', 'pnb', 'ps', 'sd', 'ug', 'ur', 'yi']\n\n# used when translating category names\n_category_names = (gettext('files'),\n gettext('general'),\n gettext('music'),\n gettext('social media'),\n gettext('images'),\n gettext('videos'),\n gettext('it'),\n gettext('news'),\n gettext('map'),\n gettext('science'))\n\noutgoing_proxies = settings['outgoing'].get('proxies') or None\n\n_flask_babel_get_translations = flask_babel.get_translations\n\n\n# monkey patch for flask_babel.get_translations\ndef _get_translations():\n if has_request_context() and request.form.get('use-translation') == 'oc':\n babel_ext = flask_babel.current_app.extensions['babel']\n return Translations.load(next(babel_ext.translation_directories), 'oc')\n\n return _flask_babel_get_translations()\n\n\nflask_babel.get_translations = _get_translations\n\n\ndef _get_browser_language(request, lang_list):\n for lang in request.headers.get(\"Accept-Language\", \"en\").split(\",\"):\n if ';' in lang:\n lang = lang.split(';')[0]\n locale = match_language(lang, lang_list, fallback=None)\n if locale is not None:\n return locale\n return settings['search']['default_lang'] or 'en'\n\n\[email protected]\ndef get_locale():\n locale = _get_browser_language(request, settings['locales'].keys())\n\n logger.debug(\"default locale from browser info is `%s`\", locale)\n\n if request.preferences.get_value('locale') != '':\n locale = request.preferences.get_value('locale')\n\n if 'locale' in request.form\\\n and request.form['locale'] in settings['locales']:\n locale = request.form['locale']\n\n if locale == 'zh_TW':\n locale = 'zh_Hant_TW'\n\n if locale == 'oc':\n request.form['use-translation'] = 'oc'\n locale = 'fr_FR'\n\n logger.debug(\"selected locale is `%s`\", locale)\n\n return locale\n\n\n# code-highlighter\[email protected]_filter('code_highlighter')\ndef code_highlighter(codelines, language=None):\n if not language:\n language = 'text'\n\n try:\n # find lexer by programing language\n lexer = get_lexer_by_name(language, stripall=True)\n except:\n # if lexer is not found, using default one\n logger.debug('highlighter cannot find lexer for {0}'.format(language))\n lexer = get_lexer_by_name('text', stripall=True)\n\n html_code = ''\n tmp_code = ''\n last_line = None\n\n # parse lines\n for line, code in codelines:\n if not last_line:\n line_code_start = line\n\n # new codeblock is detected\n if last_line is not None and\\\n last_line + 1 != line:\n\n # highlight last codepart\n formatter = HtmlFormatter(linenos='inline',\n linenostart=line_code_start)\n html_code = html_code + highlight(tmp_code, lexer, formatter)\n\n # reset conditions for next codepart\n tmp_code = ''\n line_code_start = line\n\n # add codepart\n tmp_code += code + '\\n'\n\n # update line\n last_line = line\n\n # highlight last codepart\n formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start)\n html_code = html_code + highlight(tmp_code, lexer, formatter)\n\n return html_code\n\n\n# Extract domain from url\[email protected]_filter('extract_domain')\ndef extract_domain(url):\n return urlparse(url)[1]\n\n\ndef get_base_url():\n if settings['server']['base_url']:\n hostname = settings['server']['base_url']\n else:\n scheme = 'http'\n if request.is_secure:\n scheme = 'https'\n hostname = url_for('index', _external=True, _scheme=scheme)\n return hostname\n\n\ndef get_current_theme_name(override=None):\n \"\"\"Returns theme name.\n\n Checks in this order:\n 1. override\n 2. cookies\n 3. settings\"\"\"\n\n if override and (override in themes or override == '__common__'):\n return override\n theme_name = request.args.get('theme', request.preferences.get_value('theme'))\n if theme_name not in themes:\n theme_name = default_theme\n return theme_name\n\n\ndef get_result_template(theme, template_name):\n themed_path = theme + '/result_templates/' + template_name\n if themed_path in result_templates:\n return themed_path\n return 'result_templates/' + template_name\n\n\ndef url_for_theme(endpoint, override_theme=None, **values):\n if endpoint == 'static' and values.get('filename'):\n theme_name = get_current_theme_name(override=override_theme)\n filename_with_theme = \"themes/{}/{}\".format(theme_name, values['filename'])\n if filename_with_theme in static_files:\n values['filename'] = filename_with_theme\n return url_for(endpoint, **values)\n\n\ndef proxify(url):\n if url.startswith('//'):\n url = 'https:' + url\n\n if not settings.get('result_proxy'):\n return url\n\n url_params = dict(mortyurl=url.encode('utf-8'))\n\n if settings['result_proxy'].get('key'):\n url_params['mortyhash'] = hmac.new(settings['result_proxy']['key'],\n url.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n return '{0}?{1}'.format(settings['result_proxy']['url'],\n urlencode(url_params))\n\n\ndef image_proxify(url):\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if not request.preferences.get_value('image_proxy'):\n return url\n\n if url.startswith('data:image/'):\n # 50 is an arbitrary number to get only the beginning of the image.\n partial_base64 = url[len('data:image/'):50].split(';')\n if len(partial_base64) == 2 \\\n and partial_base64[0] in ['gif', 'png', 'jpeg', 'pjpeg', 'webp', 'tiff', 'bmp']\\\n and partial_base64[1].startswith('base64,'):\n return url\n else:\n return None\n\n if settings.get('result_proxy'):\n return proxify(url)\n\n h = new_hmac(settings['server']['secret_key'], url.encode('utf-8'))\n\n return '{0}?{1}'.format(url_for('image_proxy'),\n urlencode(dict(url=url.encode('utf-8'), h=h)))\n\n\ndef render(template_name, override_theme=None, **kwargs):\n disabled_engines = request.preferences.engines.get_disabled()\n\n enabled_categories = set(category for engine_name in engines\n for category in engines[engine_name].categories\n if (engine_name, category) not in disabled_engines)\n\n if 'categories' not in kwargs:\n kwargs['categories'] = [x for x in\n _get_ordered_categories()\n if x in enabled_categories]\n\n if 'all_categories' not in kwargs:\n kwargs['all_categories'] = _get_ordered_categories()\n\n if 'selected_categories' not in kwargs:\n kwargs['selected_categories'] = []\n for arg in request.args:\n if arg.startswith('category_'):\n c = arg.split('_', 1)[1]\n if c in categories:\n kwargs['selected_categories'].append(c)\n\n if not kwargs['selected_categories']:\n cookie_categories = request.preferences.get_value('categories')\n for ccateg in cookie_categories:\n kwargs['selected_categories'].append(ccateg)\n\n if not kwargs['selected_categories']:\n kwargs['selected_categories'] = ['general']\n\n if 'autocomplete' not in kwargs:\n kwargs['autocomplete'] = request.preferences.get_value('autocomplete')\n\n locale = request.preferences.get_value('locale')\n\n if locale in rtl_locales and 'rtl' not in kwargs:\n kwargs['rtl'] = True\n\n kwargs['searx_version'] = VERSION_STRING\n\n kwargs['method'] = request.preferences.get_value('method')\n\n kwargs['safesearch'] = str(request.preferences.get_value('safesearch'))\n\n kwargs['language_codes'] = languages\n if 'current_language' not in kwargs:\n kwargs['current_language'] = match_language(request.preferences.get_value('language'),\n LANGUAGE_CODES)\n\n # override url_for function in templates\n kwargs['url_for'] = url_for_theme\n\n kwargs['image_proxify'] = image_proxify\n\n kwargs['proxify'] = proxify if settings.get('result_proxy', {}).get('url') else None\n\n kwargs['get_result_template'] = get_result_template\n\n kwargs['theme'] = get_current_theme_name(override=override_theme)\n\n kwargs['template_name'] = template_name\n\n kwargs['cookies'] = request.cookies\n\n kwargs['errors'] = request.errors\n\n kwargs['instance_name'] = settings['general']['instance_name']\n\n kwargs['results_on_new_tab'] = request.preferences.get_value('results_on_new_tab')\n\n kwargs['unicode'] = unicode\n\n kwargs['preferences'] = request.preferences\n\n kwargs['brand'] = brand\n\n kwargs['scripts'] = set()\n kwargs['endpoint'] = 'results' if 'q' in kwargs else request.endpoint\n for plugin in request.user_plugins:\n for script in plugin.js_dependencies:\n kwargs['scripts'].add(script)\n\n kwargs['styles'] = set()\n for plugin in request.user_plugins:\n for css in plugin.css_dependencies:\n kwargs['styles'].add(css)\n\n return render_template(\n '{}/{}'.format(kwargs['theme'], template_name), **kwargs)\n\n\ndef _get_ordered_categories():\n ordered_categories = []\n if 'categories_order' not in settings['ui']:\n ordered_categories = ['general']\n ordered_categories.extend(x for x in sorted(categories.keys()) if x != 'general')\n return ordered_categories\n ordered_categories = settings['ui']['categories_order']\n ordered_categories.extend(x for x in sorted(categories.keys()) if x not in ordered_categories)\n return ordered_categories\n\n\[email protected]_request\ndef pre_request():\n request.start_time = time()\n request.timings = []\n request.errors = []\n\n preferences = Preferences(themes, list(categories.keys()), engines, plugins)\n request.preferences = preferences\n try:\n preferences.parse_dict(request.cookies)\n except:\n request.errors.append(gettext('Invalid settings, please edit your preferences'))\n\n # merge GET, POST vars\n # request.form\n request.form = dict(request.form.items())\n for k, v in request.args.items():\n if k not in request.form:\n request.form[k] = v\n\n if request.form.get('preferences'):\n preferences.parse_encoded_data(request.form['preferences'])\n else:\n try:\n preferences.parse_dict(request.form)\n except Exception as e:\n logger.exception('invalid settings')\n request.errors.append(gettext('Invalid settings'))\n\n # init search language and locale\n if not preferences.get_value(\"language\"):\n preferences.parse_dict({\"language\": _get_browser_language(request, LANGUAGE_CODES)})\n if not preferences.get_value(\"locale\"):\n preferences.parse_dict({\"locale\": get_locale()})\n\n # request.user_plugins\n request.user_plugins = []\n allowed_plugins = preferences.plugins.get_enabled()\n disabled_plugins = preferences.plugins.get_disabled()\n for plugin in plugins:\n if ((plugin.default_on and plugin.id not in disabled_plugins)\n or plugin.id in allowed_plugins):\n request.user_plugins.append(plugin)\n\n\[email protected]_request\ndef post_request(response):\n total_time = time() - request.start_time\n timings_all = ['total;dur=' + str(round(total_time * 1000, 3))]\n if len(request.timings) > 0:\n timings = sorted(request.timings, key=lambda v: v['total'])\n timings_total = ['total_' + str(i) + '_' + v['engine'] +\n ';dur=' + str(round(v['total'] * 1000, 3)) for i, v in enumerate(timings)]\n timings_load = ['load_' + str(i) + '_' + v['engine'] +\n ';dur=' + str(round(v['load'] * 1000, 3)) for i, v in enumerate(timings)]\n timings_all = timings_all + timings_total + timings_load\n response.headers.add('Server-Timing', ', '.join(timings_all))\n return response\n\n\ndef index_error(output_format, error_message):\n if output_format == 'json':\n return Response(json.dumps({'error': error_message}),\n mimetype='application/json')\n elif output_format == 'csv':\n response = Response('', mimetype='application/csv')\n cont_disp = 'attachment;Filename=searx.csv'\n response.headers.add('Content-Disposition', cont_disp)\n return response\n elif output_format == 'rss':\n response_rss = render(\n 'opensearch_response_rss.xml',\n results=[],\n q=request.form['q'] if 'q' in request.form else '',\n number_of_results=0,\n base_url=get_base_url(),\n error_message=error_message,\n override_theme='__common__',\n )\n return Response(response_rss, mimetype='text/xml')\n else:\n # html\n request.errors.append(gettext('search error'))\n return render(\n 'index.html',\n )\n\n\[email protected]('/search', methods=['GET', 'POST'])\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"Render index page.\n\n Supported outputs: html, json, csv, rss.\n \"\"\"\n\n # output_format\n output_format = request.form.get('format', 'html')\n if output_format not in ['html', 'csv', 'json', 'rss']:\n output_format = 'html'\n\n # check if there is query\n if request.form.get('q') is None:\n if output_format == 'html':\n return render(\n 'index.html',\n )\n else:\n return index_error(output_format, 'No query'), 400\n\n # search\n search_query = None\n raw_text_query = None\n result_container = None\n try:\n search_query, raw_text_query = get_search_query_from_webapp(request.preferences, request.form)\n # search = Search(search_query) # without plugins\n search = SearchWithPlugins(search_query, request.user_plugins, request)\n\n result_container = search.search()\n\n except Exception as e:\n # log exception\n logger.exception('search error')\n\n # is it an invalid input parameter or something else ?\n if (issubclass(e.__class__, SearxParameterException)):\n return index_error(output_format, e.message), 400\n else:\n return index_error(output_format, gettext('search error')), 500\n\n # results\n results = result_container.get_ordered_results()\n number_of_results = result_container.results_number()\n if number_of_results < result_container.results_length():\n number_of_results = 0\n\n # checkin for a external bang\n if result_container.redirect_url:\n return redirect(result_container.redirect_url)\n\n # UI\n advanced_search = request.form.get('advanced_search', None)\n\n # Server-Timing header\n request.timings = result_container.get_timings()\n\n # output\n for result in results:\n if output_format == 'html':\n if 'content' in result and result['content']:\n result['content'] = highlight_content(escape(result['content'][:1024]), search_query.query)\n if 'title' in result and result['title']:\n result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)\n else:\n if result.get('content'):\n result['content'] = html_to_text(result['content']).strip()\n # removing html content and whitespace duplications\n result['title'] = ' '.join(html_to_text(result['title']).strip().split())\n\n if 'url' in result:\n result['pretty_url'] = prettify_url(result['url'])\n\n # TODO, check if timezone is calculated right\n if 'publishedDate' in result:\n try: # test if publishedDate >= 1900 (datetime module bug)\n result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')\n except ValueError:\n result['publishedDate'] = None\n else:\n if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):\n timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)\n minutes = int((timedifference.seconds / 60) % 60)\n hours = int(timedifference.seconds / 60 / 60)\n if hours == 0:\n result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)\n else:\n result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa\n else:\n result['publishedDate'] = format_date(result['publishedDate'])\n\n if output_format == 'json':\n return Response(json.dumps({'query': search_query.query.decode('utf-8'),\n 'number_of_results': number_of_results,\n 'results': results,\n 'answers': list(result_container.answers),\n 'corrections': list(result_container.corrections),\n 'infoboxes': result_container.infoboxes,\n 'suggestions': list(result_container.suggestions),\n 'unresponsive_engines': __get_translated_errors(result_container.unresponsive_engines)}, # noqa\n default=lambda item: list(item) if isinstance(item, set) else item),\n mimetype='application/json')\n elif output_format == 'csv':\n csv = UnicodeWriter(StringIO())\n keys = ('title', 'url', 'content', 'host', 'engine', 'score', 'type')\n csv.writerow(keys)\n for row in results:\n row['host'] = row['parsed_url'].netloc\n row['type'] = 'result'\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.answers:\n row = {'title': a, 'type': 'answer'}\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.suggestions:\n row = {'title': a, 'type': 'suggestion'}\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.corrections:\n row = {'title': a, 'type': 'correction'}\n csv.writerow([row.get(key, '') for key in keys])\n csv.stream.seek(0)\n response = Response(csv.stream.read(), mimetype='application/csv')\n cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search_query.query.decode('utf-8'))\n response.headers.add('Content-Disposition', cont_disp)\n return response\n\n elif output_format == 'rss':\n response_rss = render(\n 'opensearch_response_rss.xml',\n results=results,\n answers=result_container.answers,\n corrections=result_container.corrections,\n suggestions=result_container.suggestions,\n q=request.form['q'],\n number_of_results=number_of_results,\n base_url=get_base_url(),\n override_theme='__common__',\n )\n return Response(response_rss, mimetype='text/xml')\n\n # HTML output format\n\n # suggestions: use RawTextQuery to get the suggestion URLs with the same bang\n suggestion_urls = list(map(lambda suggestion: {\n 'url': raw_text_query.changeSearchQuery(suggestion).getFullQuery(),\n 'title': suggestion\n },\n result_container.suggestions))\n\n correction_urls = list(map(lambda correction: {\n 'url': raw_text_query.changeSearchQuery(correction).getFullQuery(),\n 'title': correction\n },\n result_container.corrections))\n #\n return render(\n 'results.html',\n results=results,\n q=request.form['q'],\n selected_categories=search_query.categories,\n pageno=search_query.pageno,\n time_range=search_query.time_range,\n number_of_results=format_decimal(number_of_results),\n advanced_search=advanced_search,\n suggestions=suggestion_urls,\n answers=result_container.answers,\n corrections=correction_urls,\n infoboxes=result_container.infoboxes,\n paging=result_container.paging,\n unresponsive_engines=__get_translated_errors(result_container.unresponsive_engines),\n current_language=match_language(search_query.lang,\n LANGUAGE_CODES,\n fallback=request.preferences.get_value(\"language\")),\n base_url=get_base_url(),\n theme=get_current_theme_name(),\n favicons=global_favicons[themes.index(get_current_theme_name())],\n timeout_limit=request.form.get('timeout_limit', None)\n )\n\n\ndef __get_translated_errors(unresponsive_engines):\n translated_errors = []\n for unresponsive_engine in unresponsive_engines:\n error_msg = gettext(unresponsive_engine[1])\n if unresponsive_engine[2]:\n error_msg = \"{} {}\".format(error_msg, unresponsive_engine[2])\n translated_errors.append((unresponsive_engine[0], error_msg))\n return translated_errors\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n \"\"\"Render about page\"\"\"\n return render(\n 'about.html',\n )\n\n\[email protected]('/autocompleter', methods=['GET', 'POST'])\ndef autocompleter():\n \"\"\"Return autocompleter results\"\"\"\n\n # set blocked engines\n disabled_engines = request.preferences.engines.get_disabled()\n\n # parse query\n if PY3:\n raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines)\n else:\n raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines)\n raw_text_query.parse_query()\n\n # check if search query is set\n if not raw_text_query.getSearchQuery():\n return '', 400\n\n # run autocompleter\n completer = autocomplete_backends.get(request.preferences.get_value('autocomplete'))\n\n # parse searx specific autocompleter results like !bang\n raw_results = searx_bang(raw_text_query)\n\n # normal autocompletion results only appear if no inner results returned\n # and there is a query part besides the engine and language bangs\n if len(raw_results) == 0 and completer and (len(raw_text_query.query_parts) > 1 or\n (len(raw_text_query.languages) == 0 and\n not raw_text_query.specific)):\n # get language from cookie\n language = request.preferences.get_value('language')\n if not language or language == 'all':\n language = 'en'\n else:\n language = language.split('-')[0]\n # run autocompletion\n raw_results.extend(completer(raw_text_query.getSearchQuery(), language))\n\n # parse results (write :language and !engine back to result string)\n results = []\n for result in raw_results:\n raw_text_query.changeSearchQuery(result)\n\n # add parsed result\n results.append(raw_text_query.getFullQuery())\n\n # return autocompleter results\n if request.headers.get('X-Requested-With') == 'XMLHttpRequest':\n return Response(json.dumps(results),\n mimetype='application/json')\n\n return Response(json.dumps([raw_text_query.query, results]),\n mimetype='application/x-suggestions+json')\n\n\[email protected]('/preferences', methods=['GET', 'POST'])\ndef preferences():\n \"\"\"Render preferences page && save user preferences\"\"\"\n\n # save preferences\n if request.method == 'POST':\n resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))\n try:\n request.preferences.parse_form(request.form)\n except ValidationException:\n request.errors.append(gettext('Invalid settings, please edit your preferences'))\n return resp\n return request.preferences.save(resp)\n\n # render preferences\n image_proxy = request.preferences.get_value('image_proxy')\n lang = request.preferences.get_value('language')\n disabled_engines = request.preferences.engines.get_disabled()\n allowed_plugins = request.preferences.plugins.get_enabled()\n\n # stats for preferences page\n stats = {}\n\n engines_by_category = {}\n for c in categories:\n engines_by_category[c] = []\n for e in categories[c]:\n if not request.preferences.validate_token(e):\n continue\n\n stats[e.name] = {'time': None,\n 'warn_timeout': False,\n 'warn_time': False}\n if e.timeout > settings['outgoing']['request_timeout']:\n stats[e.name]['warn_timeout'] = True\n stats[e.name]['supports_selected_language'] = _is_selected_language_supported(e, request.preferences)\n\n engines_by_category[c].append(e)\n\n # get first element [0], the engine time,\n # and then the second element [1] : the time (the first one is the label)\n for engine_stat in get_engines_stats(request.preferences)[0][1]:\n stats[engine_stat.get('name')]['time'] = round(engine_stat.get('avg'), 3)\n if engine_stat.get('avg') > settings['outgoing']['request_timeout']:\n stats[engine_stat.get('name')]['warn_time'] = True\n # end of stats\n\n return render('preferences.html',\n locales=settings['locales'],\n current_locale=request.preferences.get_value(\"locale\"),\n image_proxy=image_proxy,\n engines_by_category=engines_by_category,\n stats=stats,\n answerers=[{'info': a.self_info(), 'keywords': a.keywords} for a in answerers],\n disabled_engines=disabled_engines,\n autocomplete_backends=autocomplete_backends,\n shortcuts={y: x for x, y in engine_shortcuts.items()},\n themes=themes,\n plugins=plugins,\n doi_resolvers=settings['doi_resolvers'],\n current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')),\n allowed_plugins=allowed_plugins,\n theme=get_current_theme_name(),\n preferences_url_params=request.preferences.get_as_url_params(),\n base_url=get_base_url(),\n preferences=True)\n\n\ndef _is_selected_language_supported(engine, preferences):\n language = preferences.get_value('language')\n return (language == 'all'\n or match_language(language,\n getattr(engine, 'supported_languages', []),\n getattr(engine, 'language_aliases', {}), None))\n\n\[email protected]('/image_proxy', methods=['GET'])\ndef image_proxy():\n url = request.args.get('url').encode('utf-8')\n\n if not url:\n return '', 400\n\n h = new_hmac(settings['server']['secret_key'], url)\n\n if h != request.args.get('h'):\n return '', 400\n\n headers = dict_subset(request.headers, {'If-Modified-Since', 'If-None-Match'})\n headers['User-Agent'] = gen_useragent()\n\n resp = requests.get(url,\n stream=True,\n timeout=settings['outgoing']['request_timeout'],\n headers=headers,\n proxies=outgoing_proxies)\n\n if resp.status_code == 304:\n return '', resp.status_code\n\n if resp.status_code != 200:\n logger.debug('image-proxy: wrong response code: {0}'.format(resp.status_code))\n if resp.status_code >= 400:\n return '', resp.status_code\n return '', 400\n\n if not resp.headers.get('content-type', '').startswith('image/'):\n logger.debug('image-proxy: wrong content-type: {0}'.format(resp.headers.get('content-type')))\n return '', 400\n\n img = b''\n chunk_counter = 0\n\n for chunk in resp.iter_content(1024 * 1024):\n chunk_counter += 1\n if chunk_counter > 5:\n return '', 502 # Bad gateway - file is too big (>5M)\n img += chunk\n\n headers = dict_subset(resp.headers, {'Content-Length', 'Length', 'Date', 'Last-Modified', 'Expires', 'Etag'})\n\n return Response(img, mimetype=resp.headers['content-type'], headers=headers)\n\n\[email protected]('/stats', methods=['GET'])\ndef stats():\n \"\"\"Render engine statistics page.\"\"\"\n stats = get_engines_stats(request.preferences)\n return render(\n 'stats.html',\n stats=stats,\n )\n\n\[email protected]('/robots.txt', methods=['GET'])\ndef robots():\n return Response(\"\"\"User-agent: *\nAllow: /\nAllow: /about\nDisallow: /stats\nDisallow: /preferences\nDisallow: /*?*q=*\n\"\"\", mimetype='text/plain')\n\n\[email protected]('/opensearch.xml', methods=['GET'])\ndef opensearch():\n method = 'post'\n\n if request.preferences.get_value('method') == 'GET':\n method = 'get'\n\n # chrome/chromium only supports HTTP GET....\n if request.headers.get('User-Agent', '').lower().find('webkit') >= 0:\n method = 'get'\n\n ret = render('opensearch.xml',\n opensearch_method=method,\n host=get_base_url(),\n urljoin=urljoin,\n override_theme='__common__')\n\n resp = Response(response=ret,\n status=200,\n mimetype=\"application/opensearchdescription+xml\")\n return resp\n\n\[email protected]('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path,\n static_path,\n 'themes',\n get_current_theme_name(),\n 'img'),\n 'favicon.png',\n mimetype='image/vnd.microsoft.icon')\n\n\[email protected]('/clear_cookies')\ndef clear_cookies():\n resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))\n for cookie_name in request.cookies:\n resp.delete_cookie(cookie_name)\n return resp\n\n\[email protected]('/config')\ndef config():\n \"\"\"Return configuration in JSON format.\"\"\"\n _engines = []\n for name, engine in engines.items():\n if not request.preferences.validate_token(engine):\n continue\n\n supported_languages = engine.supported_languages\n if isinstance(engine.supported_languages, dict):\n supported_languages = list(engine.supported_languages.keys())\n\n _engines.append({\n 'name': name,\n 'categories': engine.categories,\n 'shortcut': engine.shortcut,\n 'enabled': not engine.disabled,\n 'paging': engine.paging,\n 'language_support': engine.language_support,\n 'supported_languages': supported_languages,\n 'safesearch': engine.safesearch,\n 'time_range_support': engine.time_range_support,\n 'timeout': engine.timeout\n })\n\n _plugins = []\n for _ in plugins:\n _plugins.append({'name': _.name, 'enabled': _.default_on})\n\n return jsonify({\n 'categories': list(categories.keys()),\n 'engines': _engines,\n 'plugins': _plugins,\n 'instance_name': settings['general']['instance_name'],\n 'locales': settings['locales'],\n 'default_locale': settings['ui']['default_locale'],\n 'autocomplete': settings['search']['autocomplete'],\n 'safe_search': settings['search']['safe_search'],\n 'default_theme': settings['ui']['default_theme'],\n 'version': VERSION_STRING,\n 'brand': {\n 'GIT_URL': brand.GIT_URL,\n 'DOCS_URL': brand.DOCS_URL\n },\n 'doi_resolvers': [r for r in settings['doi_resolvers']],\n 'default_doi_resolver': settings['default_doi_resolver'],\n })\n\n\[email protected]('/translations.js')\ndef js_translations():\n return render(\n 'translations.js.tpl',\n override_theme='__common__',\n ), {'Content-Type': 'text/javascript; charset=UTF-8'}\n\n\[email protected](404)\ndef page_not_found(e):\n return render('404.html'), 404\n\n\ndef run():\n logger.debug('starting webserver on %s:%s', settings['server']['bind_address'], settings['server']['port'])\n app.run(\n debug=searx_debug,\n use_debugger=searx_debug,\n port=settings['server']['port'],\n host=settings['server']['bind_address'],\n threaded=True\n )\n\n\nclass ReverseProxyPathFix(object):\n '''Wrap the application in this middleware and configure the\n front-end server to add these headers, to let you quietly bind\n this to a URL other than / and to an HTTP scheme that is\n different than what is used locally.\n\n http://flask.pocoo.org/snippets/35/\n\n In nginx:\n location /myprefix {\n proxy_pass http://127.0.0.1:8000;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header X-Scheme $scheme;\n proxy_set_header X-Script-Name /myprefix;\n }\n\n :param app: the WSGI application\n '''\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n script_name = environ.get('HTTP_X_SCRIPT_NAME', '')\n if script_name:\n environ['SCRIPT_NAME'] = script_name\n path_info = environ['PATH_INFO']\n if path_info.startswith(script_name):\n environ['PATH_INFO'] = path_info[len(script_name):]\n\n scheme = environ.get('HTTP_X_SCHEME', '')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n\n\napplication = app\n# patch app to handle non root url-s behind proxy & wsgi\napp.wsgi_app = ReverseProxyPathFix(ProxyFix(application.wsgi_app))\n\nif __name__ == \"__main__\":\n run()\n", "path": "searx/webapp.py" } ]
[ { "content": "#!/usr/bin/env python\n\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\nif __name__ == '__main__':\n from sys import path\n from os.path import realpath, dirname\n path.append(realpath(dirname(realpath(__file__)) + '/../'))\n\nimport hashlib\nimport hmac\nimport json\nimport os\nimport sys\n\nimport requests\n\nfrom searx import logger\nlogger = logger.getChild('webapp')\n\ntry:\n from pygments import highlight\n from pygments.lexers import get_lexer_by_name\n from pygments.formatters import HtmlFormatter\nexcept:\n logger.critical(\"cannot import dependency: pygments\")\n from sys import exit\n exit(1)\ntry:\n from cgi import escape\nexcept:\n from html import escape\nfrom six import next\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom flask import (\n Flask, request, render_template, url_for, Response, make_response,\n redirect, send_from_directory\n)\nfrom babel.support import Translations\nimport flask_babel\nfrom flask_babel import Babel, gettext, format_date, format_decimal\nfrom flask.ctx import has_request_context\nfrom flask.json import jsonify\nfrom searx import brand, static_path\nfrom searx import settings, searx_dir, searx_debug\nfrom searx.exceptions import SearxParameterException\nfrom searx.engines import (\n categories, engines, engine_shortcuts, get_engines_stats, initialize_engines\n)\nfrom searx.utils import (\n UnicodeWriter, highlight_content, html_to_text, get_resources_directory,\n get_static_files, get_result_templates, get_themes, gen_useragent,\n dict_subset, prettify_url, match_language\n)\nfrom searx.version import VERSION_STRING\nfrom searx.languages import language_codes as languages\nfrom searx.search import SearchWithPlugins, get_search_query_from_webapp\nfrom searx.query import RawTextQuery\nfrom searx.autocomplete import searx_bang, backends as autocomplete_backends\nfrom searx.plugins import plugins\nfrom searx.plugins.oa_doi_rewrite import get_doi_resolver\nfrom searx.preferences import Preferences, ValidationException, LANGUAGE_CODES\nfrom searx.answerers import answerers\nfrom searx.url_utils import urlencode, urlparse, urljoin\nfrom searx.utils import new_hmac\n\n# check if the pyopenssl package is installed.\n# It is needed for SSL connection without trouble, see #298\ntry:\n import OpenSSL.SSL # NOQA\nexcept ImportError:\n logger.critical(\"The pyopenssl package has to be installed.\\n\"\n \"Some HTTPS connections will fail\")\n\ntry:\n from cStringIO import StringIO\nexcept:\n from io import StringIO\n\n\nif sys.version_info[0] == 3:\n unicode = str\n PY3 = True\nelse:\n logger.warning('\\033[1;31m Python2 is no longer supported\\033[0m')\n exit(1)\n\n# serve pages with HTTP/1.1\nfrom werkzeug.serving import WSGIRequestHandler\nWSGIRequestHandler.protocol_version = \"HTTP/{}\".format(settings['server'].get('http_protocol_version', '1.0'))\n\n# about static\nstatic_path = get_resources_directory(searx_dir, 'static', settings['ui']['static_path'])\nlogger.debug('static directory is %s', static_path)\nstatic_files = get_static_files(static_path)\n\n# about templates\ndefault_theme = settings['ui']['default_theme']\ntemplates_path = get_resources_directory(searx_dir, 'templates', settings['ui']['templates_path'])\nlogger.debug('templates directory is %s', templates_path)\nthemes = get_themes(templates_path)\nresult_templates = get_result_templates(templates_path)\nglobal_favicons = []\nfor indice, theme in enumerate(themes):\n global_favicons.append([])\n theme_img_path = os.path.join(static_path, 'themes', theme, 'img', 'icons')\n for (dirpath, dirnames, filenames) in os.walk(theme_img_path):\n global_favicons[indice].extend(filenames)\n\n# Flask app\napp = Flask(\n __name__,\n static_folder=static_path,\n template_folder=templates_path\n)\n\napp.jinja_env.trim_blocks = True\napp.jinja_env.lstrip_blocks = True\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.secret_key = settings['server']['secret_key']\n\nif not searx_debug \\\n or os.environ.get(\"WERKZEUG_RUN_MAIN\") == \"true\" \\\n or os.environ.get('UWSGI_ORIGINAL_PROC_NAME') is not None:\n initialize_engines(settings['engines'])\n\nbabel = Babel(app)\n\nrtl_locales = ['ar', 'arc', 'bcc', 'bqi', 'ckb', 'dv', 'fa', 'fa_IR', 'glk', 'he',\n 'ku', 'mzn', 'pnb', 'ps', 'sd', 'ug', 'ur', 'yi']\n\n# used when translating category names\n_category_names = (gettext('files'),\n gettext('general'),\n gettext('music'),\n gettext('social media'),\n gettext('images'),\n gettext('videos'),\n gettext('it'),\n gettext('news'),\n gettext('map'),\n gettext('science'))\n\noutgoing_proxies = settings['outgoing'].get('proxies') or None\n\n_flask_babel_get_translations = flask_babel.get_translations\n\n\n# monkey patch for flask_babel.get_translations\ndef _get_translations():\n if has_request_context() and request.form.get('use-translation') == 'oc':\n babel_ext = flask_babel.current_app.extensions['babel']\n return Translations.load(next(babel_ext.translation_directories), 'oc')\n\n return _flask_babel_get_translations()\n\n\nflask_babel.get_translations = _get_translations\n\n\ndef _get_browser_language(request, lang_list):\n for lang in request.headers.get(\"Accept-Language\", \"en\").split(\",\"):\n if ';' in lang:\n lang = lang.split(';')[0]\n locale = match_language(lang, lang_list, fallback=None)\n if locale is not None:\n return locale\n return settings['search']['default_lang'] or 'en'\n\n\[email protected]\ndef get_locale():\n locale = _get_browser_language(request, settings['locales'].keys())\n\n logger.debug(\"default locale from browser info is `%s`\", locale)\n\n if request.preferences.get_value('locale') != '':\n locale = request.preferences.get_value('locale')\n\n if 'locale' in request.form\\\n and request.form['locale'] in settings['locales']:\n locale = request.form['locale']\n\n if locale == 'zh_TW':\n locale = 'zh_Hant_TW'\n\n if locale == 'oc':\n request.form['use-translation'] = 'oc'\n locale = 'fr_FR'\n\n logger.debug(\"selected locale is `%s`\", locale)\n\n return locale\n\n\n# code-highlighter\[email protected]_filter('code_highlighter')\ndef code_highlighter(codelines, language=None):\n if not language:\n language = 'text'\n\n try:\n # find lexer by programing language\n lexer = get_lexer_by_name(language, stripall=True)\n except:\n # if lexer is not found, using default one\n logger.debug('highlighter cannot find lexer for {0}'.format(language))\n lexer = get_lexer_by_name('text', stripall=True)\n\n html_code = ''\n tmp_code = ''\n last_line = None\n\n # parse lines\n for line, code in codelines:\n if not last_line:\n line_code_start = line\n\n # new codeblock is detected\n if last_line is not None and\\\n last_line + 1 != line:\n\n # highlight last codepart\n formatter = HtmlFormatter(linenos='inline',\n linenostart=line_code_start)\n html_code = html_code + highlight(tmp_code, lexer, formatter)\n\n # reset conditions for next codepart\n tmp_code = ''\n line_code_start = line\n\n # add codepart\n tmp_code += code + '\\n'\n\n # update line\n last_line = line\n\n # highlight last codepart\n formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start)\n html_code = html_code + highlight(tmp_code, lexer, formatter)\n\n return html_code\n\n\n# Extract domain from url\[email protected]_filter('extract_domain')\ndef extract_domain(url):\n return urlparse(url)[1]\n\n\ndef get_base_url():\n if settings['server']['base_url']:\n hostname = settings['server']['base_url']\n else:\n scheme = 'http'\n if request.is_secure:\n scheme = 'https'\n hostname = url_for('index', _external=True, _scheme=scheme)\n return hostname\n\n\ndef get_current_theme_name(override=None):\n \"\"\"Returns theme name.\n\n Checks in this order:\n 1. override\n 2. cookies\n 3. settings\"\"\"\n\n if override and (override in themes or override == '__common__'):\n return override\n theme_name = request.args.get('theme', request.preferences.get_value('theme'))\n if theme_name not in themes:\n theme_name = default_theme\n return theme_name\n\n\ndef get_result_template(theme, template_name):\n themed_path = theme + '/result_templates/' + template_name\n if themed_path in result_templates:\n return themed_path\n return 'result_templates/' + template_name\n\n\ndef url_for_theme(endpoint, override_theme=None, **values):\n if endpoint == 'static' and values.get('filename'):\n theme_name = get_current_theme_name(override=override_theme)\n filename_with_theme = \"themes/{}/{}\".format(theme_name, values['filename'])\n if filename_with_theme in static_files:\n values['filename'] = filename_with_theme\n return url_for(endpoint, **values)\n\n\ndef proxify(url):\n if url.startswith('//'):\n url = 'https:' + url\n\n if not settings.get('result_proxy'):\n return url\n\n url_params = dict(mortyurl=url.encode('utf-8'))\n\n if settings['result_proxy'].get('key'):\n url_params['mortyhash'] = hmac.new(settings['result_proxy']['key'],\n url.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n return '{0}?{1}'.format(settings['result_proxy']['url'],\n urlencode(url_params))\n\n\ndef image_proxify(url):\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if not request.preferences.get_value('image_proxy'):\n return url\n\n if url.startswith('data:image/'):\n # 50 is an arbitrary number to get only the beginning of the image.\n partial_base64 = url[len('data:image/'):50].split(';')\n if len(partial_base64) == 2 \\\n and partial_base64[0] in ['gif', 'png', 'jpeg', 'pjpeg', 'webp', 'tiff', 'bmp']\\\n and partial_base64[1].startswith('base64,'):\n return url\n else:\n return None\n\n if settings.get('result_proxy'):\n return proxify(url)\n\n h = new_hmac(settings['server']['secret_key'], url.encode('utf-8'))\n\n return '{0}?{1}'.format(url_for('image_proxy'),\n urlencode(dict(url=url.encode('utf-8'), h=h)))\n\n\ndef render(template_name, override_theme=None, **kwargs):\n disabled_engines = request.preferences.engines.get_disabled()\n\n enabled_categories = set(category for engine_name in engines\n for category in engines[engine_name].categories\n if (engine_name, category) not in disabled_engines)\n\n if 'categories' not in kwargs:\n kwargs['categories'] = [x for x in\n _get_ordered_categories()\n if x in enabled_categories]\n\n if 'all_categories' not in kwargs:\n kwargs['all_categories'] = _get_ordered_categories()\n\n if 'selected_categories' not in kwargs:\n kwargs['selected_categories'] = []\n for arg in request.args:\n if arg.startswith('category_'):\n c = arg.split('_', 1)[1]\n if c in categories:\n kwargs['selected_categories'].append(c)\n\n if not kwargs['selected_categories']:\n cookie_categories = request.preferences.get_value('categories')\n for ccateg in cookie_categories:\n kwargs['selected_categories'].append(ccateg)\n\n if not kwargs['selected_categories']:\n kwargs['selected_categories'] = ['general']\n\n if 'autocomplete' not in kwargs:\n kwargs['autocomplete'] = request.preferences.get_value('autocomplete')\n\n locale = request.preferences.get_value('locale')\n\n if locale in rtl_locales and 'rtl' not in kwargs:\n kwargs['rtl'] = True\n\n kwargs['searx_version'] = VERSION_STRING\n\n kwargs['method'] = request.preferences.get_value('method')\n\n kwargs['safesearch'] = str(request.preferences.get_value('safesearch'))\n\n kwargs['language_codes'] = languages\n if 'current_language' not in kwargs:\n kwargs['current_language'] = match_language(request.preferences.get_value('language'),\n LANGUAGE_CODES)\n\n # override url_for function in templates\n kwargs['url_for'] = url_for_theme\n\n kwargs['image_proxify'] = image_proxify\n\n kwargs['proxify'] = proxify if settings.get('result_proxy', {}).get('url') else None\n\n kwargs['get_result_template'] = get_result_template\n\n kwargs['theme'] = get_current_theme_name(override=override_theme)\n\n kwargs['template_name'] = template_name\n\n kwargs['cookies'] = request.cookies\n\n kwargs['errors'] = request.errors\n\n kwargs['instance_name'] = settings['general']['instance_name']\n\n kwargs['results_on_new_tab'] = request.preferences.get_value('results_on_new_tab')\n\n kwargs['unicode'] = unicode\n\n kwargs['preferences'] = request.preferences\n\n kwargs['brand'] = brand\n\n kwargs['scripts'] = set()\n kwargs['endpoint'] = 'results' if 'q' in kwargs else request.endpoint\n for plugin in request.user_plugins:\n for script in plugin.js_dependencies:\n kwargs['scripts'].add(script)\n\n kwargs['styles'] = set()\n for plugin in request.user_plugins:\n for css in plugin.css_dependencies:\n kwargs['styles'].add(css)\n\n return render_template(\n '{}/{}'.format(kwargs['theme'], template_name), **kwargs)\n\n\ndef _get_ordered_categories():\n ordered_categories = []\n if 'categories_order' not in settings['ui']:\n ordered_categories = ['general']\n ordered_categories.extend(x for x in sorted(categories.keys()) if x != 'general')\n return ordered_categories\n ordered_categories = settings['ui']['categories_order']\n ordered_categories.extend(x for x in sorted(categories.keys()) if x not in ordered_categories)\n return ordered_categories\n\n\[email protected]_request\ndef pre_request():\n request.start_time = time()\n request.timings = []\n request.errors = []\n\n preferences = Preferences(themes, list(categories.keys()), engines, plugins)\n user_agent = request.headers.get('User-Agent', '').lower()\n if 'webkit' in user_agent and 'android' in user_agent:\n preferences.key_value_settings['method'].value = 'GET'\n request.preferences = preferences\n try:\n preferences.parse_dict(request.cookies)\n except:\n request.errors.append(gettext('Invalid settings, please edit your preferences'))\n\n # merge GET, POST vars\n # request.form\n request.form = dict(request.form.items())\n for k, v in request.args.items():\n if k not in request.form:\n request.form[k] = v\n\n if request.form.get('preferences'):\n preferences.parse_encoded_data(request.form['preferences'])\n else:\n try:\n preferences.parse_dict(request.form)\n except Exception as e:\n logger.exception('invalid settings')\n request.errors.append(gettext('Invalid settings'))\n\n # init search language and locale\n if not preferences.get_value(\"language\"):\n preferences.parse_dict({\"language\": _get_browser_language(request, LANGUAGE_CODES)})\n if not preferences.get_value(\"locale\"):\n preferences.parse_dict({\"locale\": get_locale()})\n\n # request.user_plugins\n request.user_plugins = []\n allowed_plugins = preferences.plugins.get_enabled()\n disabled_plugins = preferences.plugins.get_disabled()\n for plugin in plugins:\n if ((plugin.default_on and plugin.id not in disabled_plugins)\n or plugin.id in allowed_plugins):\n request.user_plugins.append(plugin)\n\n\[email protected]_request\ndef post_request(response):\n total_time = time() - request.start_time\n timings_all = ['total;dur=' + str(round(total_time * 1000, 3))]\n if len(request.timings) > 0:\n timings = sorted(request.timings, key=lambda v: v['total'])\n timings_total = ['total_' + str(i) + '_' + v['engine'] +\n ';dur=' + str(round(v['total'] * 1000, 3)) for i, v in enumerate(timings)]\n timings_load = ['load_' + str(i) + '_' + v['engine'] +\n ';dur=' + str(round(v['load'] * 1000, 3)) for i, v in enumerate(timings)]\n timings_all = timings_all + timings_total + timings_load\n response.headers.add('Server-Timing', ', '.join(timings_all))\n return response\n\n\ndef index_error(output_format, error_message):\n if output_format == 'json':\n return Response(json.dumps({'error': error_message}),\n mimetype='application/json')\n elif output_format == 'csv':\n response = Response('', mimetype='application/csv')\n cont_disp = 'attachment;Filename=searx.csv'\n response.headers.add('Content-Disposition', cont_disp)\n return response\n elif output_format == 'rss':\n response_rss = render(\n 'opensearch_response_rss.xml',\n results=[],\n q=request.form['q'] if 'q' in request.form else '',\n number_of_results=0,\n base_url=get_base_url(),\n error_message=error_message,\n override_theme='__common__',\n )\n return Response(response_rss, mimetype='text/xml')\n else:\n # html\n request.errors.append(gettext('search error'))\n return render(\n 'index.html',\n )\n\n\[email protected]('/search', methods=['GET', 'POST'])\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"Render index page.\n\n Supported outputs: html, json, csv, rss.\n \"\"\"\n\n # output_format\n output_format = request.form.get('format', 'html')\n if output_format not in ['html', 'csv', 'json', 'rss']:\n output_format = 'html'\n\n # check if there is query\n if request.form.get('q') is None:\n if output_format == 'html':\n return render(\n 'index.html',\n )\n else:\n return index_error(output_format, 'No query'), 400\n\n # search\n search_query = None\n raw_text_query = None\n result_container = None\n try:\n search_query, raw_text_query = get_search_query_from_webapp(request.preferences, request.form)\n # search = Search(search_query) # without plugins\n search = SearchWithPlugins(search_query, request.user_plugins, request)\n\n result_container = search.search()\n\n except Exception as e:\n # log exception\n logger.exception('search error')\n\n # is it an invalid input parameter or something else ?\n if (issubclass(e.__class__, SearxParameterException)):\n return index_error(output_format, e.message), 400\n else:\n return index_error(output_format, gettext('search error')), 500\n\n # results\n results = result_container.get_ordered_results()\n number_of_results = result_container.results_number()\n if number_of_results < result_container.results_length():\n number_of_results = 0\n\n # checkin for a external bang\n if result_container.redirect_url:\n return redirect(result_container.redirect_url)\n\n # UI\n advanced_search = request.form.get('advanced_search', None)\n\n # Server-Timing header\n request.timings = result_container.get_timings()\n\n # output\n for result in results:\n if output_format == 'html':\n if 'content' in result and result['content']:\n result['content'] = highlight_content(escape(result['content'][:1024]), search_query.query)\n if 'title' in result and result['title']:\n result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)\n else:\n if result.get('content'):\n result['content'] = html_to_text(result['content']).strip()\n # removing html content and whitespace duplications\n result['title'] = ' '.join(html_to_text(result['title']).strip().split())\n\n if 'url' in result:\n result['pretty_url'] = prettify_url(result['url'])\n\n # TODO, check if timezone is calculated right\n if 'publishedDate' in result:\n try: # test if publishedDate >= 1900 (datetime module bug)\n result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')\n except ValueError:\n result['publishedDate'] = None\n else:\n if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):\n timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)\n minutes = int((timedifference.seconds / 60) % 60)\n hours = int(timedifference.seconds / 60 / 60)\n if hours == 0:\n result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)\n else:\n result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa\n else:\n result['publishedDate'] = format_date(result['publishedDate'])\n\n if output_format == 'json':\n return Response(json.dumps({'query': search_query.query.decode('utf-8'),\n 'number_of_results': number_of_results,\n 'results': results,\n 'answers': list(result_container.answers),\n 'corrections': list(result_container.corrections),\n 'infoboxes': result_container.infoboxes,\n 'suggestions': list(result_container.suggestions),\n 'unresponsive_engines': __get_translated_errors(result_container.unresponsive_engines)}, # noqa\n default=lambda item: list(item) if isinstance(item, set) else item),\n mimetype='application/json')\n elif output_format == 'csv':\n csv = UnicodeWriter(StringIO())\n keys = ('title', 'url', 'content', 'host', 'engine', 'score', 'type')\n csv.writerow(keys)\n for row in results:\n row['host'] = row['parsed_url'].netloc\n row['type'] = 'result'\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.answers:\n row = {'title': a, 'type': 'answer'}\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.suggestions:\n row = {'title': a, 'type': 'suggestion'}\n csv.writerow([row.get(key, '') for key in keys])\n for a in result_container.corrections:\n row = {'title': a, 'type': 'correction'}\n csv.writerow([row.get(key, '') for key in keys])\n csv.stream.seek(0)\n response = Response(csv.stream.read(), mimetype='application/csv')\n cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search_query.query.decode('utf-8'))\n response.headers.add('Content-Disposition', cont_disp)\n return response\n\n elif output_format == 'rss':\n response_rss = render(\n 'opensearch_response_rss.xml',\n results=results,\n answers=result_container.answers,\n corrections=result_container.corrections,\n suggestions=result_container.suggestions,\n q=request.form['q'],\n number_of_results=number_of_results,\n base_url=get_base_url(),\n override_theme='__common__',\n )\n return Response(response_rss, mimetype='text/xml')\n\n # HTML output format\n\n # suggestions: use RawTextQuery to get the suggestion URLs with the same bang\n suggestion_urls = list(map(lambda suggestion: {\n 'url': raw_text_query.changeSearchQuery(suggestion).getFullQuery(),\n 'title': suggestion\n },\n result_container.suggestions))\n\n correction_urls = list(map(lambda correction: {\n 'url': raw_text_query.changeSearchQuery(correction).getFullQuery(),\n 'title': correction\n },\n result_container.corrections))\n #\n return render(\n 'results.html',\n results=results,\n q=request.form['q'],\n selected_categories=search_query.categories,\n pageno=search_query.pageno,\n time_range=search_query.time_range,\n number_of_results=format_decimal(number_of_results),\n advanced_search=advanced_search,\n suggestions=suggestion_urls,\n answers=result_container.answers,\n corrections=correction_urls,\n infoboxes=result_container.infoboxes,\n paging=result_container.paging,\n unresponsive_engines=__get_translated_errors(result_container.unresponsive_engines),\n current_language=match_language(search_query.lang,\n LANGUAGE_CODES,\n fallback=request.preferences.get_value(\"language\")),\n base_url=get_base_url(),\n theme=get_current_theme_name(),\n favicons=global_favicons[themes.index(get_current_theme_name())],\n timeout_limit=request.form.get('timeout_limit', None)\n )\n\n\ndef __get_translated_errors(unresponsive_engines):\n translated_errors = []\n for unresponsive_engine in unresponsive_engines:\n error_msg = gettext(unresponsive_engine[1])\n if unresponsive_engine[2]:\n error_msg = \"{} {}\".format(error_msg, unresponsive_engine[2])\n translated_errors.append((unresponsive_engine[0], error_msg))\n return translated_errors\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n \"\"\"Render about page\"\"\"\n return render(\n 'about.html',\n )\n\n\[email protected]('/autocompleter', methods=['GET', 'POST'])\ndef autocompleter():\n \"\"\"Return autocompleter results\"\"\"\n\n # set blocked engines\n disabled_engines = request.preferences.engines.get_disabled()\n\n # parse query\n if PY3:\n raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines)\n else:\n raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines)\n raw_text_query.parse_query()\n\n # check if search query is set\n if not raw_text_query.getSearchQuery():\n return '', 400\n\n # run autocompleter\n completer = autocomplete_backends.get(request.preferences.get_value('autocomplete'))\n\n # parse searx specific autocompleter results like !bang\n raw_results = searx_bang(raw_text_query)\n\n # normal autocompletion results only appear if no inner results returned\n # and there is a query part besides the engine and language bangs\n if len(raw_results) == 0 and completer and (len(raw_text_query.query_parts) > 1 or\n (len(raw_text_query.languages) == 0 and\n not raw_text_query.specific)):\n # get language from cookie\n language = request.preferences.get_value('language')\n if not language or language == 'all':\n language = 'en'\n else:\n language = language.split('-')[0]\n # run autocompletion\n raw_results.extend(completer(raw_text_query.getSearchQuery(), language))\n\n # parse results (write :language and !engine back to result string)\n results = []\n for result in raw_results:\n raw_text_query.changeSearchQuery(result)\n\n # add parsed result\n results.append(raw_text_query.getFullQuery())\n\n # return autocompleter results\n if request.headers.get('X-Requested-With') == 'XMLHttpRequest':\n return Response(json.dumps(results),\n mimetype='application/json')\n\n return Response(json.dumps([raw_text_query.query, results]),\n mimetype='application/x-suggestions+json')\n\n\[email protected]('/preferences', methods=['GET', 'POST'])\ndef preferences():\n \"\"\"Render preferences page && save user preferences\"\"\"\n\n # save preferences\n if request.method == 'POST':\n resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))\n try:\n request.preferences.parse_form(request.form)\n except ValidationException:\n request.errors.append(gettext('Invalid settings, please edit your preferences'))\n return resp\n return request.preferences.save(resp)\n\n # render preferences\n image_proxy = request.preferences.get_value('image_proxy')\n lang = request.preferences.get_value('language')\n disabled_engines = request.preferences.engines.get_disabled()\n allowed_plugins = request.preferences.plugins.get_enabled()\n\n # stats for preferences page\n stats = {}\n\n engines_by_category = {}\n for c in categories:\n engines_by_category[c] = []\n for e in categories[c]:\n if not request.preferences.validate_token(e):\n continue\n\n stats[e.name] = {'time': None,\n 'warn_timeout': False,\n 'warn_time': False}\n if e.timeout > settings['outgoing']['request_timeout']:\n stats[e.name]['warn_timeout'] = True\n stats[e.name]['supports_selected_language'] = _is_selected_language_supported(e, request.preferences)\n\n engines_by_category[c].append(e)\n\n # get first element [0], the engine time,\n # and then the second element [1] : the time (the first one is the label)\n for engine_stat in get_engines_stats(request.preferences)[0][1]:\n stats[engine_stat.get('name')]['time'] = round(engine_stat.get('avg'), 3)\n if engine_stat.get('avg') > settings['outgoing']['request_timeout']:\n stats[engine_stat.get('name')]['warn_time'] = True\n # end of stats\n\n return render('preferences.html',\n locales=settings['locales'],\n current_locale=request.preferences.get_value(\"locale\"),\n image_proxy=image_proxy,\n engines_by_category=engines_by_category,\n stats=stats,\n answerers=[{'info': a.self_info(), 'keywords': a.keywords} for a in answerers],\n disabled_engines=disabled_engines,\n autocomplete_backends=autocomplete_backends,\n shortcuts={y: x for x, y in engine_shortcuts.items()},\n themes=themes,\n plugins=plugins,\n doi_resolvers=settings['doi_resolvers'],\n current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')),\n allowed_plugins=allowed_plugins,\n theme=get_current_theme_name(),\n preferences_url_params=request.preferences.get_as_url_params(),\n base_url=get_base_url(),\n preferences=True)\n\n\ndef _is_selected_language_supported(engine, preferences):\n language = preferences.get_value('language')\n return (language == 'all'\n or match_language(language,\n getattr(engine, 'supported_languages', []),\n getattr(engine, 'language_aliases', {}), None))\n\n\[email protected]('/image_proxy', methods=['GET'])\ndef image_proxy():\n url = request.args.get('url').encode('utf-8')\n\n if not url:\n return '', 400\n\n h = new_hmac(settings['server']['secret_key'], url)\n\n if h != request.args.get('h'):\n return '', 400\n\n headers = dict_subset(request.headers, {'If-Modified-Since', 'If-None-Match'})\n headers['User-Agent'] = gen_useragent()\n\n resp = requests.get(url,\n stream=True,\n timeout=settings['outgoing']['request_timeout'],\n headers=headers,\n proxies=outgoing_proxies)\n\n if resp.status_code == 304:\n return '', resp.status_code\n\n if resp.status_code != 200:\n logger.debug('image-proxy: wrong response code: {0}'.format(resp.status_code))\n if resp.status_code >= 400:\n return '', resp.status_code\n return '', 400\n\n if not resp.headers.get('content-type', '').startswith('image/'):\n logger.debug('image-proxy: wrong content-type: {0}'.format(resp.headers.get('content-type')))\n return '', 400\n\n img = b''\n chunk_counter = 0\n\n for chunk in resp.iter_content(1024 * 1024):\n chunk_counter += 1\n if chunk_counter > 5:\n return '', 502 # Bad gateway - file is too big (>5M)\n img += chunk\n\n headers = dict_subset(resp.headers, {'Content-Length', 'Length', 'Date', 'Last-Modified', 'Expires', 'Etag'})\n\n return Response(img, mimetype=resp.headers['content-type'], headers=headers)\n\n\[email protected]('/stats', methods=['GET'])\ndef stats():\n \"\"\"Render engine statistics page.\"\"\"\n stats = get_engines_stats(request.preferences)\n return render(\n 'stats.html',\n stats=stats,\n )\n\n\[email protected]('/robots.txt', methods=['GET'])\ndef robots():\n return Response(\"\"\"User-agent: *\nAllow: /\nAllow: /about\nDisallow: /stats\nDisallow: /preferences\nDisallow: /*?*q=*\n\"\"\", mimetype='text/plain')\n\n\[email protected]('/opensearch.xml', methods=['GET'])\ndef opensearch():\n method = 'post'\n\n if request.preferences.get_value('method') == 'GET':\n method = 'get'\n\n # chrome/chromium only supports HTTP GET....\n if request.headers.get('User-Agent', '').lower().find('webkit') >= 0:\n method = 'get'\n\n ret = render('opensearch.xml',\n opensearch_method=method,\n host=get_base_url(),\n urljoin=urljoin,\n override_theme='__common__')\n\n resp = Response(response=ret,\n status=200,\n mimetype=\"application/opensearchdescription+xml\")\n return resp\n\n\[email protected]('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path,\n static_path,\n 'themes',\n get_current_theme_name(),\n 'img'),\n 'favicon.png',\n mimetype='image/vnd.microsoft.icon')\n\n\[email protected]('/clear_cookies')\ndef clear_cookies():\n resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))\n for cookie_name in request.cookies:\n resp.delete_cookie(cookie_name)\n return resp\n\n\[email protected]('/config')\ndef config():\n \"\"\"Return configuration in JSON format.\"\"\"\n _engines = []\n for name, engine in engines.items():\n if not request.preferences.validate_token(engine):\n continue\n\n supported_languages = engine.supported_languages\n if isinstance(engine.supported_languages, dict):\n supported_languages = list(engine.supported_languages.keys())\n\n _engines.append({\n 'name': name,\n 'categories': engine.categories,\n 'shortcut': engine.shortcut,\n 'enabled': not engine.disabled,\n 'paging': engine.paging,\n 'language_support': engine.language_support,\n 'supported_languages': supported_languages,\n 'safesearch': engine.safesearch,\n 'time_range_support': engine.time_range_support,\n 'timeout': engine.timeout\n })\n\n _plugins = []\n for _ in plugins:\n _plugins.append({'name': _.name, 'enabled': _.default_on})\n\n return jsonify({\n 'categories': list(categories.keys()),\n 'engines': _engines,\n 'plugins': _plugins,\n 'instance_name': settings['general']['instance_name'],\n 'locales': settings['locales'],\n 'default_locale': settings['ui']['default_locale'],\n 'autocomplete': settings['search']['autocomplete'],\n 'safe_search': settings['search']['safe_search'],\n 'default_theme': settings['ui']['default_theme'],\n 'version': VERSION_STRING,\n 'brand': {\n 'GIT_URL': brand.GIT_URL,\n 'DOCS_URL': brand.DOCS_URL\n },\n 'doi_resolvers': [r for r in settings['doi_resolvers']],\n 'default_doi_resolver': settings['default_doi_resolver'],\n })\n\n\[email protected]('/translations.js')\ndef js_translations():\n return render(\n 'translations.js.tpl',\n override_theme='__common__',\n ), {'Content-Type': 'text/javascript; charset=UTF-8'}\n\n\[email protected](404)\ndef page_not_found(e):\n return render('404.html'), 404\n\n\ndef run():\n logger.debug('starting webserver on %s:%s', settings['server']['bind_address'], settings['server']['port'])\n app.run(\n debug=searx_debug,\n use_debugger=searx_debug,\n port=settings['server']['port'],\n host=settings['server']['bind_address'],\n threaded=True\n )\n\n\nclass ReverseProxyPathFix(object):\n '''Wrap the application in this middleware and configure the\n front-end server to add these headers, to let you quietly bind\n this to a URL other than / and to an HTTP scheme that is\n different than what is used locally.\n\n http://flask.pocoo.org/snippets/35/\n\n In nginx:\n location /myprefix {\n proxy_pass http://127.0.0.1:8000;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header X-Scheme $scheme;\n proxy_set_header X-Script-Name /myprefix;\n }\n\n :param app: the WSGI application\n '''\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n script_name = environ.get('HTTP_X_SCRIPT_NAME', '')\n if script_name:\n environ['SCRIPT_NAME'] = script_name\n path_info = environ['PATH_INFO']\n if path_info.startswith(script_name):\n environ['PATH_INFO'] = path_info[len(script_name):]\n\n scheme = environ.get('HTTP_X_SCHEME', '')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n\n\napplication = app\n# patch app to handle non root url-s behind proxy & wsgi\napp.wsgi_app = ReverseProxyPathFix(ProxyFix(application.wsgi_app))\n\nif __name__ == \"__main__\":\n run()\n", "path": "searx/webapp.py" } ]
diff --git a/searx/templates/__common__/opensearch.xml b/searx/templates/__common__/opensearch.xml index 7fdc1f7d87..244e101321 100644 --- a/searx/templates/__common__/opensearch.xml +++ b/searx/templates/__common__/opensearch.xml @@ -6,7 +6,7 @@ <Image>{{ urljoin(host, url_for('static', filename='img/favicon.png')) }}</Image> <LongName>searx metasearch</LongName> {% if opensearch_method == 'get' %} - <Url rel="results" type="text/html" method="get" template="{{ host }}search?q={searchTerms}"/> + <Url rel="results" type="text/html" method="get" template="{{ host }}?q={searchTerms}"/> {% else %} <Url rel="results" type="text/html" method="post" template="{{ host }}"> <Param name="q" value="{searchTerms}" /> diff --git a/searx/webapp.py b/searx/webapp.py index f79525d39e..4c0eceaafb 100755 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -463,6 +463,9 @@ def pre_request(): request.errors = [] preferences = Preferences(themes, list(categories.keys()), engines, plugins) + user_agent = request.headers.get('User-Agent', '').lower() + if 'webkit' in user_agent and 'android' in user_agent: + preferences.key_value_settings['method'].value = 'GET' request.preferences = preferences try: preferences.parse_dict(request.cookies)
Chromium-based browsers (Android) don't detect searx
sanic-org__sanic-2452
[ { "content": "from __future__ import annotations\n\nfrom typing import Dict, List, Optional, Tuple, Type, Union\n\nfrom sanic.config import Config\nfrom sanic.errorpages import (\n DEFAULT_FORMAT,\n BaseRenderer,\n TextRenderer,\n exception_response,\n)\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n InvalidRangeType,\n SanicException,\n)\nfrom sanic.helpers import Default, _default\nfrom sanic.log import deprecation, error_logger\nfrom sanic.models.handler_types import RouteHandler\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n def __init__(\n self,\n fallback: Union[str, Default] = _default,\n base: Type[BaseRenderer] = TextRenderer,\n ):\n self.cached_handlers: Dict[\n Tuple[Type[BaseException], Optional[str]], Optional[RouteHandler]\n ] = {}\n self.debug = False\n self._fallback = fallback\n self.base = base\n\n if fallback is not _default:\n self._warn_fallback_deprecation()\n\n @property\n def fallback(self): # no cov\n # This is for backwards compat and can be removed in v22.6\n if self._fallback is _default:\n return DEFAULT_FORMAT\n return self._fallback\n\n @fallback.setter\n def fallback(self, value: str): # no cov\n self._warn_fallback_deprecation()\n if not isinstance(value, str):\n raise SanicException(\n f\"Cannot set error handler fallback to: value={value}\"\n )\n self._fallback = value\n\n @staticmethod\n def _warn_fallback_deprecation():\n deprecation(\n \"Setting the ErrorHandler fallback value directly is \"\n \"deprecated and no longer supported. This feature will \"\n \"be removed in v22.6. Instead, use \"\n \"app.config.FALLBACK_ERROR_FORMAT.\",\n 22.6,\n )\n\n @classmethod\n def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config):\n if error_handler._fallback is not _default:\n if config._FALLBACK_ERROR_FORMAT is _default:\n return error_handler.fallback\n\n error_logger.warning(\n \"Conflicting error fallback values were found in the \"\n \"error handler and in the app.config while handling an \"\n \"exception. Using the value from app.config.\"\n )\n return config.FALLBACK_ERROR_FORMAT\n\n @classmethod\n def finalize(\n cls,\n error_handler: ErrorHandler,\n config: Config,\n fallback: Optional[str] = None,\n ):\n if fallback:\n deprecation(\n \"Setting the ErrorHandler fallback value via finalize() \"\n \"is deprecated and no longer supported. This feature will \"\n \"be removed in v22.6. Instead, use \"\n \"app.config.FALLBACK_ERROR_FORMAT.\",\n 22.6,\n )\n\n if not fallback:\n fallback = config.FALLBACK_ERROR_FORMAT\n\n if fallback != DEFAULT_FORMAT:\n if error_handler._fallback is not _default:\n error_logger.warning(\n f\"Setting the fallback value to {fallback}. This changes \"\n \"the current non-default value \"\n f\"'{error_handler._fallback}'.\"\n )\n error_handler._fallback = fallback\n\n if not isinstance(error_handler, cls):\n error_logger.warning(\n f\"Error handler is non-conforming: {type(error_handler)}\"\n )\n\n def _full_lookup(self, exception, route_name: Optional[str] = None):\n return self.lookup(exception, route_name)\n\n def add(self, exception, handler, route_names: Optional[List[str]] = None):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n if route_names:\n for route in route_names:\n self.cached_handlers[(exception, route)] = handler\n else:\n self.cached_handlers[(exception, None)] = handler\n\n def lookup(self, exception, route_name: Optional[str] = None):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n exception_class = type(exception)\n\n for name in (route_name, None):\n exception_key = (exception_class, name)\n handler = self.cached_handlers.get(exception_key)\n if handler:\n return handler\n\n for name in (route_name, None):\n for ancestor in type.mro(exception_class):\n exception_key = (ancestor, name)\n if exception_key in self.cached_handlers:\n handler = self.cached_handlers[exception_key]\n self.cached_handlers[\n (exception_class, route_name)\n ] = handler\n return handler\n\n if ancestor is BaseException:\n break\n self.cached_handlers[(exception_class, route_name)] = None\n handler = None\n return handler\n\n _lookup = _full_lookup\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n route_name = request.name if request else None\n handler = self._lookup(exception, route_name)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n self.log(request, exception)\n fallback = ErrorHandler._get_fallback_value(self, request.app.config)\n return exception_response(\n request,\n exception,\n debug=self.debug,\n base=self.base,\n fallback=fallback,\n )\n\n @staticmethod\n def log(request, exception):\n quiet = getattr(exception, \"quiet\", False)\n noisy = getattr(request.app.config, \"NOISY_EXCEPTIONS\", False)\n if quiet is False or noisy is True:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom typing import Dict, List, Optional, Tuple, Type, Union\n\nfrom sanic.config import Config\nfrom sanic.errorpages import (\n DEFAULT_FORMAT,\n BaseRenderer,\n TextRenderer,\n exception_response,\n)\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n InvalidRangeType,\n SanicException,\n)\nfrom sanic.helpers import Default, _default\nfrom sanic.log import deprecation, error_logger\nfrom sanic.models.handler_types import RouteHandler\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n def __init__(\n self,\n fallback: Union[str, Default] = _default,\n base: Type[BaseRenderer] = TextRenderer,\n ):\n self.cached_handlers: Dict[\n Tuple[Type[BaseException], Optional[str]], Optional[RouteHandler]\n ] = {}\n self.debug = False\n self._fallback = fallback\n self.base = base\n\n if fallback is not _default:\n self._warn_fallback_deprecation()\n\n @property\n def fallback(self): # no cov\n # This is for backwards compat and can be removed in v22.6\n if self._fallback is _default:\n return DEFAULT_FORMAT\n return self._fallback\n\n @fallback.setter\n def fallback(self, value: str): # no cov\n self._warn_fallback_deprecation()\n if not isinstance(value, str):\n raise SanicException(\n f\"Cannot set error handler fallback to: value={value}\"\n )\n self._fallback = value\n\n @staticmethod\n def _warn_fallback_deprecation():\n deprecation(\n \"Setting the ErrorHandler fallback value directly is \"\n \"deprecated and no longer supported. This feature will \"\n \"be removed in v22.6. Instead, use \"\n \"app.config.FALLBACK_ERROR_FORMAT.\",\n 22.6,\n )\n\n @classmethod\n def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config):\n if error_handler._fallback is not _default:\n if config._FALLBACK_ERROR_FORMAT == error_handler._fallback:\n return error_handler.fallback\n\n error_logger.warning(\n \"Conflicting error fallback values were found in the \"\n \"error handler and in the app.config while handling an \"\n \"exception. Using the value from app.config.\"\n )\n return config.FALLBACK_ERROR_FORMAT\n\n @classmethod\n def finalize(\n cls,\n error_handler: ErrorHandler,\n config: Config,\n fallback: Optional[str] = None,\n ):\n if fallback:\n deprecation(\n \"Setting the ErrorHandler fallback value via finalize() \"\n \"is deprecated and no longer supported. This feature will \"\n \"be removed in v22.6. Instead, use \"\n \"app.config.FALLBACK_ERROR_FORMAT.\",\n 22.6,\n )\n\n if not fallback:\n fallback = config.FALLBACK_ERROR_FORMAT\n\n if fallback != DEFAULT_FORMAT:\n if error_handler._fallback is not _default:\n error_logger.warning(\n f\"Setting the fallback value to {fallback}. This changes \"\n \"the current non-default value \"\n f\"'{error_handler._fallback}'.\"\n )\n error_handler._fallback = fallback\n\n if not isinstance(error_handler, cls):\n error_logger.warning(\n f\"Error handler is non-conforming: {type(error_handler)}\"\n )\n\n def _full_lookup(self, exception, route_name: Optional[str] = None):\n return self.lookup(exception, route_name)\n\n def add(self, exception, handler, route_names: Optional[List[str]] = None):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n if route_names:\n for route in route_names:\n self.cached_handlers[(exception, route)] = handler\n else:\n self.cached_handlers[(exception, None)] = handler\n\n def lookup(self, exception, route_name: Optional[str] = None):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n exception_class = type(exception)\n\n for name in (route_name, None):\n exception_key = (exception_class, name)\n handler = self.cached_handlers.get(exception_key)\n if handler:\n return handler\n\n for name in (route_name, None):\n for ancestor in type.mro(exception_class):\n exception_key = (ancestor, name)\n if exception_key in self.cached_handlers:\n handler = self.cached_handlers[exception_key]\n self.cached_handlers[\n (exception_class, route_name)\n ] = handler\n return handler\n\n if ancestor is BaseException:\n break\n self.cached_handlers[(exception_class, route_name)] = None\n handler = None\n return handler\n\n _lookup = _full_lookup\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n route_name = request.name if request else None\n handler = self._lookup(exception, route_name)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n self.log(request, exception)\n fallback = ErrorHandler._get_fallback_value(self, request.app.config)\n return exception_response(\n request,\n exception,\n debug=self.debug,\n base=self.base,\n fallback=fallback,\n )\n\n @staticmethod\n def log(request, exception):\n quiet = getattr(exception, \"quiet\", False)\n noisy = getattr(request.app.config, \"NOISY_EXCEPTIONS\", False)\n if quiet is False or noisy is True:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py" } ]
diff --git a/sanic/handlers.py b/sanic/handlers.py index b3f01566a5..917edd234a 100644 --- a/sanic/handlers.py +++ b/sanic/handlers.py @@ -78,7 +78,7 @@ def _warn_fallback_deprecation(): @classmethod def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config): if error_handler._fallback is not _default: - if config._FALLBACK_ERROR_FORMAT is _default: + if config._FALLBACK_ERROR_FORMAT == error_handler._fallback: return error_handler.fallback error_logger.warning(
Error Handler mismatch warning The warning for error handler mismatch is triggering on v22.3 accidentally when setting `FALLBACK_ERROR_FORMAT`. ```python app.config.FALLBACK_ERROR_FORMAT = "text" @app.get("/") async def handler(request: Request): 1 / 0 ``` This can be resolved as follows: ```python @classmethod def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config): if error_handler._fallback is not _default: if config._FALLBACK_ERROR_FORMAT == error_handler._fallback: # <<<<< This line needs this change return error_handler.fallback error_logger.warning( "Conflicting error fallback values were found in the " "error handler and in the app.config while handling an " "exception. Using the value from app.config." ) return config.FALLBACK_ERROR_FORMAT ``` https://github.com/sanic-org/sanic/blob/5d683c6ea4b615e80c51d80189436437b824cce6/sanic/handlers.py#L79
microsoft__knossos-ksc-1027
[ { "content": "import torch\nimport ksc.torch_frontend as knossos\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# BEGINDOC\[email protected]\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.vmap(sqrl)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n", "path": "examples/dl-capsule/sqrl.py" } ]
[ { "content": "import torch\nimport ksc.torch_frontend as knossos\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# BEGINDOC\[email protected]\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.vmap(sqrl)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n", "path": "examples/dl-capsule/sqrl.py" } ]
diff --git a/examples/dl-capsule/sqrl.py b/examples/dl-capsule/sqrl.py index f4f94791e..2f022b265 100644 --- a/examples/dl-capsule/sqrl.py +++ b/examples/dl-capsule/sqrl.py @@ -23,12 +23,12 @@ def sqrl(x: torch.Tensor): # run-bench: PyTorch "fast" implementation def sqrl_pytorch(x: torch.Tensor): - return sqrl(x) + return sqrl.raw_f(x) # run-bench: PyTorch "nice" implementation def sqrl_pytorch_nice(x: torch.Tensor): - return sqrl(x) + return sqrl.raw_f(x) # run-bench: Define a range of values at which to call the methods
Bug: Segmentation fault in sqrl_pytorch-PyTorch CUDA Just saw this while working on something else. I haven't done a lot to debug it, but note that it's in copydown, on a fairly innocuous operation (aten::sum(Tensor 2) -> Float), so might be something to do with KS_ALLOCATOR not being defined? Or could just be out of memory not caught? ![image](https://user-images.githubusercontent.com/128119/130364073-023976aa-9222-4d55-ad43-95a7ac530d76.png)
learningequality__kolibri-7238
[ { "content": "import fnmatch\nimport logging\nimport os\n\nfrom django.core.cache import cache\nfrom sqlalchemy.exc import DatabaseError\n\nfrom .paths import get_content_database_dir_path\nfrom .sqlalchemybridge import Bridge\nfrom kolibri.core.discovery.utils.filesystem import enumerate_mounted_disk_partitions\nfrom kolibri.utils.uuids import is_valid_uuid\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_channel_ids_for_content_dirs(content_dirs):\n database_dir_paths = [\n get_content_database_dir_path(contentfolder=path) for path in content_dirs\n ]\n channel_ids = set()\n for path in database_dir_paths:\n channel_ids.update(get_channel_ids_for_content_database_dir(path))\n return list(channel_ids)\n\n\ndef get_channel_ids_for_content_database_dir(content_database_dir):\n \"\"\"\n Returns a list of channel IDs for the channel databases that exist in a content database directory.\n \"\"\"\n\n # immediately return an empty list if the content database directory doesn't exist\n if not os.path.isdir(content_database_dir):\n return []\n\n # get a list of all the database files in the directory, and extract IDs\n db_list = fnmatch.filter(os.listdir(content_database_dir), \"*.sqlite3\")\n db_names = [db.split(\".sqlite3\", 1)[0] for db in db_list]\n\n # determine which database names are valid, and only use those ones\n valid_db_names = [name for name in db_names if is_valid_uuid(name)]\n invalid_db_names = set(db_names) - set(valid_db_names)\n if invalid_db_names:\n logger.warning(\n \"Ignoring databases in content database directory '{directory}' with invalid names: {names}\".format(\n directory=content_database_dir, names=invalid_db_names\n )\n )\n\n # nonexistent database files are created if we delete the files that have broken symbolic links;\n # empty database files are created if we delete a database file while the server is running and connected to it;\n # here, we delete and exclude such databases to avoid errors when we try to connect to them\n db_files_to_remove = set({})\n for db_name in valid_db_names:\n filename = os.path.join(content_database_dir, \"{}.sqlite3\".format(db_name))\n if not os.path.exists(filename) or os.path.getsize(filename) == 0:\n db_files_to_remove.add(db_name)\n os.remove(filename)\n\n if db_files_to_remove:\n err_msg = (\n \"Removing nonexistent or empty databases in content database directory \"\n \"'{directory}' with IDs: {names}.\\nPlease import the channels again.\"\n )\n logger.warning(\n err_msg.format(directory=content_database_dir, names=db_files_to_remove)\n )\n valid_dbs = list(set(valid_db_names) - set(db_files_to_remove))\n\n return valid_dbs\n\n\ndef enumerate_content_database_file_paths(content_database_dir):\n full_dir_template = os.path.join(content_database_dir, \"{}.sqlite3\")\n channel_ids = get_channel_ids_for_content_database_dir(content_database_dir)\n return [full_dir_template.format(f) for f in channel_ids]\n\n\ndef read_channel_metadata_from_db_file(channeldbpath):\n # import here to avoid circular imports whenever kolibri.core.content.models imports utils too\n from kolibri.core.content.models import ChannelMetadata\n\n source = Bridge(sqlite_file_path=channeldbpath)\n\n ChannelMetadataClass = source.get_class(ChannelMetadata)\n\n source_channel_metadata = source.session.query(ChannelMetadataClass).all()[0]\n\n # Use the inferred version from the SQLAlchemy Bridge object, and set it as additional\n # metadata on the channel data\n\n source_channel_metadata.inferred_schema_version = source.schema_version\n\n source.end()\n\n # Adds an attribute `root_id` when `root_id` does not exist to match with\n # the latest schema.\n if not hasattr(source_channel_metadata, \"root_id\"):\n setattr(\n source_channel_metadata,\n \"root_id\",\n getattr(source_channel_metadata, \"root_pk\"),\n )\n\n return source_channel_metadata\n\n\ndef get_channels_for_data_folder(datafolder):\n channels = []\n for path in enumerate_content_database_file_paths(\n get_content_database_dir_path(datafolder)\n ):\n try:\n channel = read_channel_metadata_from_db_file(path)\n except DatabaseError:\n logger.warning(\n \"Tried to import channel from database file {}, but the file was corrupted.\".format(\n path\n )\n )\n continue\n channel_data = {\n \"path\": path,\n \"id\": channel.id,\n \"name\": channel.name,\n \"description\": channel.description,\n \"tagline\": channel.tagline,\n \"thumbnail\": channel.thumbnail,\n \"version\": channel.version,\n \"root\": channel.root_id,\n \"author\": channel.author,\n \"last_updated\": getattr(channel, \"last_updated\", None),\n \"lang_code\": getattr(channel, \"lang_code\", None),\n \"lang_name\": getattr(channel, \"lang_name\", None),\n }\n channels.append(channel_data)\n return channels\n\n\n# Use this to cache mounted drive information when\n# it has already been fetched for querying by drive id\nMOUNTED_DRIVES_CACHE_KEY = \"mounted_drives_cache_key\"\n\n\ndef get_mounted_drives_with_channel_info():\n drives = enumerate_mounted_disk_partitions()\n for drive in drives.values():\n drive.metadata[\"channels\"] = (\n get_channels_for_data_folder(drive.datafolder) if drive.datafolder else []\n )\n cache.set(MOUNTED_DRIVES_CACHE_KEY, drives, 3600)\n return drives\n\n\ndef get_mounted_drive_by_id(drive_id):\n drives = cache.get(MOUNTED_DRIVES_CACHE_KEY)\n if drives is None or drives.get(drive_id, None) is None:\n drives = get_mounted_drives_with_channel_info()\n return drives[drive_id]\n", "path": "kolibri/core/content/utils/channels.py" } ]
[ { "content": "import fnmatch\nimport logging\nimport os\n\nfrom django.core.cache import cache\nfrom sqlalchemy.exc import DatabaseError\n\nfrom .paths import get_content_database_dir_path\nfrom .sqlalchemybridge import Bridge\nfrom kolibri.core.discovery.utils.filesystem import enumerate_mounted_disk_partitions\nfrom kolibri.utils.uuids import is_valid_uuid\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_channel_ids_for_content_dirs(content_dirs):\n database_dir_paths = [\n get_content_database_dir_path(contentfolder=path) for path in content_dirs\n ]\n channel_ids = set()\n for path in database_dir_paths:\n channel_ids.update(get_channel_ids_for_content_database_dir(path))\n return list(channel_ids)\n\n\ndef get_channel_ids_for_content_database_dir(content_database_dir):\n \"\"\"\n Returns a list of channel IDs for the channel databases that exist in a content database directory.\n \"\"\"\n\n # immediately return an empty list if the content database directory doesn't exist\n if not os.path.isdir(content_database_dir):\n return []\n\n # get a list of all the database files in the directory, and extract IDs\n db_list = fnmatch.filter(os.listdir(content_database_dir), \"*.sqlite3\")\n db_names = [db.split(\".sqlite3\", 1)[0] for db in db_list]\n\n # determine which database names are valid, and only use those ones\n valid_db_names = [name for name in db_names if is_valid_uuid(name)]\n invalid_db_names = set(db_names) - set(valid_db_names)\n if invalid_db_names:\n logger.warning(\n \"Ignoring databases in content database directory '{directory}' with invalid names: {names}\".format(\n directory=content_database_dir, names=invalid_db_names\n )\n )\n\n # nonexistent database files are created if we delete the files that have broken symbolic links;\n # empty database files are created if we delete a database file while the server is running and connected to it;\n # here, we delete and exclude such databases to avoid errors when we try to connect to them\n db_files_to_remove = set({})\n for db_name in valid_db_names:\n filename = os.path.join(content_database_dir, \"{}.sqlite3\".format(db_name))\n if not os.path.exists(filename) or os.path.getsize(filename) == 0:\n db_files_to_remove.add(db_name)\n os.remove(filename)\n\n if db_files_to_remove:\n err_msg = (\n \"Removing nonexistent or empty databases in content database directory \"\n \"'{directory}' with IDs: {names}.\\nPlease import the channels again.\"\n )\n logger.warning(\n err_msg.format(directory=content_database_dir, names=db_files_to_remove)\n )\n valid_dbs = list(set(valid_db_names) - set(db_files_to_remove))\n\n return valid_dbs\n\n\ndef enumerate_content_database_file_paths(content_database_dir):\n full_dir_template = os.path.join(content_database_dir, \"{}.sqlite3\")\n channel_ids = get_channel_ids_for_content_database_dir(content_database_dir)\n return [full_dir_template.format(f) for f in channel_ids]\n\n\ndef read_channel_metadata_from_db_file(channeldbpath):\n # import here to avoid circular imports whenever kolibri.core.content.models imports utils too\n from kolibri.core.content.models import ChannelMetadata\n\n source = Bridge(sqlite_file_path=channeldbpath)\n\n ChannelMetadataClass = source.get_class(ChannelMetadata)\n\n source_channel_metadata = source.session.query(ChannelMetadataClass).all()[0]\n\n # Use the inferred version from the SQLAlchemy Bridge object, and set it as additional\n # metadata on the channel data\n\n source_channel_metadata.inferred_schema_version = source.schema_version\n\n source.end()\n\n # Adds an attribute `root_id` when `root_id` does not exist to match with\n # the latest schema.\n if not hasattr(source_channel_metadata, \"root_id\"):\n setattr(\n source_channel_metadata,\n \"root_id\",\n getattr(source_channel_metadata, \"root_pk\"),\n )\n\n return source_channel_metadata\n\n\ndef get_channels_for_data_folder(datafolder):\n channels = []\n for path in enumerate_content_database_file_paths(\n get_content_database_dir_path(datafolder)\n ):\n try:\n channel = read_channel_metadata_from_db_file(path)\n except DatabaseError:\n logger.warning(\n \"Tried to import channel from database file {}, but the file was corrupted.\".format(\n path\n )\n )\n continue\n channel_data = {\n \"path\": path,\n \"id\": channel.id,\n \"name\": channel.name,\n \"description\": channel.description,\n \"tagline\": getattr(channel, \"tagline\", \"\"),\n \"thumbnail\": channel.thumbnail,\n \"version\": channel.version,\n \"root\": channel.root_id,\n \"author\": channel.author,\n \"last_updated\": getattr(channel, \"last_updated\", None),\n \"lang_code\": getattr(channel, \"lang_code\", None),\n \"lang_name\": getattr(channel, \"lang_name\", None),\n }\n channels.append(channel_data)\n return channels\n\n\n# Use this to cache mounted drive information when\n# it has already been fetched for querying by drive id\nMOUNTED_DRIVES_CACHE_KEY = \"mounted_drives_cache_key\"\n\n\ndef get_mounted_drives_with_channel_info():\n drives = enumerate_mounted_disk_partitions()\n for drive in drives.values():\n drive.metadata[\"channels\"] = (\n get_channels_for_data_folder(drive.datafolder) if drive.datafolder else []\n )\n cache.set(MOUNTED_DRIVES_CACHE_KEY, drives, 3600)\n return drives\n\n\ndef get_mounted_drive_by_id(drive_id):\n drives = cache.get(MOUNTED_DRIVES_CACHE_KEY)\n if drives is None or drives.get(drive_id, None) is None:\n drives = get_mounted_drives_with_channel_info()\n return drives[drive_id]\n", "path": "kolibri/core/content/utils/channels.py" } ]
diff --git a/kolibri/core/assets/src/views/BottomAppBar.vue b/kolibri/core/assets/src/views/BottomAppBar.vue index 7c063606fbc..e285bcdd983 100644 --- a/kolibri/core/assets/src/views/BottomAppBar.vue +++ b/kolibri/core/assets/src/views/BottomAppBar.vue @@ -65,6 +65,8 @@ } .inner-bottom { + height: 100%; + padding: 10px 0; margin: auto; } diff --git a/kolibri/core/content/utils/channels.py b/kolibri/core/content/utils/channels.py index 1e7437c6081..88c22c266fc 100644 --- a/kolibri/core/content/utils/channels.py +++ b/kolibri/core/content/utils/channels.py @@ -123,7 +123,7 @@ def get_channels_for_data_folder(datafolder): "id": channel.id, "name": channel.name, "description": channel.description, - "tagline": channel.tagline, + "tagline": getattr(channel, "tagline", ""), "thumbnail": channel.thumbnail, "version": channel.version, "root": channel.root_id, diff --git a/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/CreateExamPreview.vue b/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/CreateExamPreview.vue index ed3ee525ff4..fbd5a342529 100644 --- a/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/CreateExamPreview.vue +++ b/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/CreateExamPreview.vue @@ -100,7 +100,7 @@ /> <BottomAppBar style="z-index: 1062;"> - <KButtonGroup style="margin-top: 8px;"> + <KButtonGroup> <KRouterLink appearance="flat-button" :text="coreString('goBackAction')" diff --git a/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/index.vue b/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/index.vue index 762abae0713..c8c99acec1c 100644 --- a/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/index.vue +++ b/kolibri/plugins/coach/assets/src/views/plan/CreateExamPage/index.vue @@ -128,7 +128,7 @@ /> </BottomAppBar> <BottomAppBar v-else> - <KButtonGroup style="margin-top: 8px;"> + <KButtonGroup> <KRouterLink appearance="flat-button" :text="coreString('goBackAction')" diff --git a/kolibri/plugins/coach/assets/src/views/plan/assignments/AssignmentDetailsModal.vue b/kolibri/plugins/coach/assets/src/views/plan/assignments/AssignmentDetailsModal.vue index f8edbd2ac69..ffb1e9b3b99 100644 --- a/kolibri/plugins/coach/assets/src/views/plan/assignments/AssignmentDetailsModal.vue +++ b/kolibri/plugins/coach/assets/src/views/plan/assignments/AssignmentDetailsModal.vue @@ -53,7 +53,7 @@ </form> <BottomAppBar> - <KButtonGroup style="margin-top: 8px;"> + <KButtonGroup> <KButton :text="coreString('cancelAction')" appearance="flat-button" diff --git a/kolibri/plugins/device/assets/src/app.js b/kolibri/plugins/device/assets/src/app.js index 2a2e4214a6b..7a6ab75cc3b 100644 --- a/kolibri/plugins/device/assets/src/app.js +++ b/kolibri/plugins/device/assets/src/app.js @@ -17,7 +17,7 @@ class DeviceManagementModule extends KolibriApp { ready() { // reset module states after leaving their respective page router.beforeEach((to, from, next) => { - if (this.store.getters.isSuperuser && this.store.state.core.facilities.length === 0) { + if (this.store.state.core.facilities.length === 0) { this.store.dispatch('getFacilities').then(next, next); } next(); diff --git a/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectionBottomBar.vue b/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectionBottomBar.vue index d14c5111d73..85f395a01e2 100644 --- a/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectionBottomBar.vue +++ b/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectionBottomBar.vue @@ -1,9 +1,9 @@ <template> <BottomAppBar> - <span class="message">{{ selectedMessage }}</span> <template v-if="actionType === 'manage'"> - <KButtonGroup style="margin-top: 8px;"> + <KButtonGroup> + <span class="message">{{ selectedMessage }}</span> <KButton :disabled="$attrs.disabled || buttonsDisabled" :text="coreString('deleteAction')" @@ -19,13 +19,15 @@ </KButtonGroup> </template> - <KButton - v-else - :disabled="$attrs.disabled || buttonsDisabled" - :text="confirmButtonLabel" - :primary="true" - @click="$emit('clickconfirm')" - /> + <template v-else> + <span class="message">{{ selectedMessage }}</span> + <KButton + :disabled="$attrs.disabled || buttonsDisabled" + :text="confirmButtonLabel" + :primary="true" + @click="$emit('clickconfirm')" + /> + </template> </BottomAppBar> </template> diff --git a/kolibri/plugins/device/assets/src/views/PostSetupModalGroup.vue b/kolibri/plugins/device/assets/src/views/PostSetupModalGroup.vue index 82226a81bf6..e476e8c431a 100644 --- a/kolibri/plugins/device/assets/src/views/PostSetupModalGroup.vue +++ b/kolibri/plugins/device/assets/src/views/PostSetupModalGroup.vue @@ -68,7 +68,7 @@ // This used to determine Select Source workflow to enter into importedFacility() { const [facility] = this.$store.state.core.facilities; - if (facility.last_synced !== null) { + if (facility && facility.last_synced !== null) { return facility; } return null; diff --git a/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue b/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue index 392a2745ed7..b52f0010a4b 100644 --- a/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue +++ b/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue @@ -3,7 +3,6 @@ <KPageContainer> <h1>{{ $tr('syncData') }}</h1> - <p>{{ $tr('access') }}</p> <p> <KButton appearance="basic-link" @@ -131,8 +130,6 @@ }, $trs: { syncData: 'Sync facility data', - access: - 'This is an experimental feature. You can use it if you have access to the Kolibri Data Portal.', learnMore: 'Usage and privacy', facility: 'Facility', register: 'Register', diff --git a/kolibri/plugins/facility/assets/src/views/UserCreatePage.vue b/kolibri/plugins/facility/assets/src/views/UserCreatePage.vue index 85d9c052585..8d8b497e8d2 100644 --- a/kolibri/plugins/facility/assets/src/views/UserCreatePage.vue +++ b/kolibri/plugins/facility/assets/src/views/UserCreatePage.vue @@ -215,11 +215,14 @@ submitForm() { this.formSubmitted = true; let password = this.password; - if (!this.formIsValid) { - return this.focusOnInvalidField(); - } + if (!this.showPasswordInput) { password = 'NOT_SPECIFIED'; + this.passwordValid = true; + } + + if (!this.formIsValid) { + return this.focusOnInvalidField(); } this.busy = true; this.$store diff --git a/kolibri/plugins/learn/assets/src/views/AssessmentWrapper/index.vue b/kolibri/plugins/learn/assets/src/views/AssessmentWrapper/index.vue index a04166272d4..832531b9dc0 100644 --- a/kolibri/plugins/learn/assets/src/views/AssessmentWrapper/index.vue +++ b/kolibri/plugins/learn/assets/src/views/AssessmentWrapper/index.vue @@ -50,7 +50,6 @@ oriented data synchronization. <KIcon icon="mastered" :color="success ? $themeTokens.mastered : $themePalette.grey.v_200" - style="margin-bottom: -6px;" /> <div class="overall-status-text"> <span v-if="success" class="completed" :style="{ color: $themeTokens.annotation }"> diff --git a/kolibri/plugins/setup_wizard/assets/src/modules/pluginModule.js b/kolibri/plugins/setup_wizard/assets/src/modules/pluginModule.js index 48956b20e7e..4dc03f08746 100644 --- a/kolibri/plugins/setup_wizard/assets/src/modules/pluginModule.js +++ b/kolibri/plugins/setup_wizard/assets/src/modules/pluginModule.js @@ -12,7 +12,7 @@ const SetupStrings = createTranslator('SetupStrings', { }, }); -const { NOT_SPECIFIED } = DemographicConstants; +const { DEFERRED } = DemographicConstants; export default { state: { @@ -44,8 +44,10 @@ export default { full_name: '', username: '', password: '', - gender: NOT_SPECIFIED, - birth_year: NOT_SPECIFIED, + // Superusers are not required to provide this info and are not + // nudged to update the profile when on Learn page + gender: DEFERRED, + birth_year: DEFERRED, }, }, loading: false, diff --git a/kolibri/plugins/setup_wizard/assets/test/views/SuperuserCredentialsForm.spec.js b/kolibri/plugins/setup_wizard/assets/test/views/SuperuserCredentialsForm.spec.js index f5e19b62578..de3555375b7 100644 --- a/kolibri/plugins/setup_wizard/assets/test/views/SuperuserCredentialsForm.spec.js +++ b/kolibri/plugins/setup_wizard/assets/test/views/SuperuserCredentialsForm.spec.js @@ -33,8 +33,8 @@ describe('SuperuserCredentialsForm', () => { full_name: 'Schoolhouse Rock', username: 'schoolhouse_rock', password: 'password', - birth_year: 'NOT_SPECIFIED', - gender: 'NOT_SPECIFIED', + birth_year: 'DEFERRED', + gender: 'DEFERRED', }); expect(wrapper.vm.$emit).toHaveBeenCalledWith('click_next', { full_name: 'Schoolhouse Rock', diff --git a/kolibri/plugins/user/assets/src/views/SignInPage.vue b/kolibri/plugins/user/assets/src/views/SignInPage.vue index 1e59e11e92e..0c69e6f113d 100644 --- a/kolibri/plugins/user/assets/src/views/SignInPage.vue +++ b/kolibri/plugins/user/assets/src/views/SignInPage.vue @@ -179,7 +179,7 @@ ref="username" v-model="username" autocomplete="username" - :autofocus="!hasMultipleFacilities" + :autofocus="true" :label="coreString('usernameLabel')" :invalid="usernameIsInvalid" :invalidText="usernameIsInvalidText"
import footer styling regression ### Observed behavior ![image](https://user-images.githubusercontent.com/2367265/85073073-fef1cd80-b16e-11ea-98c5-8e2342430929.png) ### Expected behavior button and text should be vertically centered, or the footer should be shorter in height ### Steps to reproduce import ### Context 0.14 beta 3
cocotb__cocotb-2079
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# -*- coding: utf-8 -*-\n\nimport ctypes\nimport warnings\n\nimport cocotb\nfrom cocotb import simulator\nfrom cocotb.binary import BinaryValue\nfrom cocotb.log import SimLog\nfrom cocotb.result import TestError\n\n# Only issue a warning for each deprecated attribute access\n_deprecation_warned = set()\n\n\nclass SimHandleBase:\n \"\"\"Base class for all simulation objects.\n\n We maintain a handle which we can use for GPI calls.\n \"\"\"\n\n # For backwards compatibility we support a mapping of old member names\n # which may alias with the simulator hierarchy. In these cases the\n # simulator result takes priority, only falling back to the python member\n # if there is no colliding object in the elaborated design.\n _compat_mapping = {\n \"log\" : \"_log\",\n \"fullname\" : \"_fullname\",\n \"name\" : \"_name\",\n }\n\n def __init__(self, handle, path):\n \"\"\"\n .. Constructor. This RST comment works around sphinx-doc/sphinx#6885\n\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n \"\"\"\n self._handle = handle\n self._len = None # type: int\n \"\"\"The \"length\" (the number of elements) of the underlying object. For vectors this is the number of bits.\"\"\"\n self._sub_handles = {} # type: dict\n \"\"\"Dictionary of this handle's children.\"\"\"\n self._invalid_sub_handles = set() # type: set\n \"\"\"Python :class:`set` of invalid queries, for caching purposes.\"\"\"\n self._name = self._handle.get_name_string() # type: str\n \"\"\"The name of an object.\n\n :meta public:\n \"\"\"\n self._type = self._handle.get_type_string() # type: str\n \"\"\"The type of an object as a string.\n\n :meta public:\n \"\"\"\n self._fullname = self._name + \"(%s)\" % self._type # type: str\n \"\"\"The name of an object with its type appended in parentheses.\"\"\"\n self._path = self._name if path is None else path # type: str\n \"\"\"The path to this handle, or its name if this is the root handle.\n\n :meta public:\n \"\"\"\n self._log = SimLog(\"cocotb.%s\" % self._name)\n \"\"\"The logging object.\"\"\"\n self._log.debug(\"Created\")\n self._def_name = self._handle.get_definition_name() # type: str\n \"\"\"The name of a GPI object's definition.\n\n This is the value of ``vpiDefName`` for VPI, ``vhpiNameP`` for VHPI,\n and ``mti_GetPrimaryName`` for FLI.\n Support for this depends on the specific object type and simulator used.\n\n :meta public:\n \"\"\"\n self._def_file = self._handle.get_definition_file() # type: str\n \"\"\"The name of the file that sources the object's definition.\n\n This is the value of ``vpiDefFile`` for VPI, ``vhpiFileNameP`` for VHPI,\n and ``mti_GetRegionSourceName`` for FLI.\n Support for this depends on the specific object type and simulator used.\n\n :meta public:\n \"\"\"\n\n def get_definition_name(self):\n return self._def_name\n\n def get_definition_file(self):\n return self._def_file\n\n def __hash__(self):\n return hash(self._handle)\n\n def __len__(self):\n \"\"\"Return the \"length\" (the number of elements) of the underlying object.\n\n For vectors this is the number of bits.\n \"\"\"\n if self._len is None:\n self._len = self._handle.get_num_elems()\n return self._len\n\n def __eq__(self, other):\n \"\"\"Equality comparator for handles\n\n Example usage::\n\n if clk == dut.clk:\n do_something()\n \"\"\"\n if not isinstance(other, SimHandleBase):\n return NotImplemented\n return self._handle == other._handle\n\n def __ne__(self, other):\n if not isinstance(other, SimHandleBase):\n return NotImplemented\n return self._handle != other._handle\n\n def __repr__(self):\n desc = self._path\n defname = self._def_name\n if defname:\n desc += \" with definition \"+defname\n deffile = self._def_file\n if deffile:\n desc += \" (at \"+deffile+\")\"\n return type(self).__qualname__ + \"(\" + desc + \")\"\n\n def __str__(self):\n return self._path\n\n def __setattr__(self, name, value):\n if name in self._compat_mapping:\n if name not in _deprecation_warned:\n warnings.warn(\"Use of attribute %r is deprecated, use %r instead\" % (name, self._compat_mapping[name]))\n _deprecation_warned.add(name)\n return setattr(self, self._compat_mapping[name], value)\n else:\n return object.__setattr__(self, name, value)\n\n def __getattr__(self, name):\n if name in self._compat_mapping:\n if name not in _deprecation_warned:\n warnings.warn(\"Use of attribute %r is deprecated, use %r instead\" % (name, self._compat_mapping[name]))\n _deprecation_warned.add(name)\n return getattr(self, self._compat_mapping[name])\n else:\n return object.__getattribute__(self, name)\n\n\nclass RegionObject(SimHandleBase):\n \"\"\"A region object, such as a scope or namespace.\n\n Region objects don't have values, they are effectively scopes or namespaces.\n \"\"\"\n\n def __init__(self, handle, path):\n SimHandleBase.__init__(self, handle, path)\n self._discovered = False # True if this object has already been discovered\n\n def __iter__(self):\n \"\"\"Iterate over all known objects in this layer of hierarchy.\"\"\"\n if not self._discovered:\n self._discover_all()\n\n for name, handle in self._sub_handles.items():\n if isinstance(handle, list):\n self._log.debug(\"Found index list length %d\", len(handle))\n for subindex, subhdl in enumerate(handle):\n if subhdl is None:\n self._log.warning(\"Index %d doesn't exist in %s.%s\", subindex, self._name, name)\n continue\n self._log.debug(\"Yielding index %d from %s (%s)\", subindex, name, type(subhdl))\n yield subhdl\n else:\n self._log.debug(\"Yielding %s (%s)\", name, handle)\n yield handle\n\n def _discover_all(self):\n \"\"\"When iterating or performing IPython tab completion, we run through ahead of\n time and discover all possible children, populating the :any:`_sub_handles`\n mapping. Hierarchy can't change after elaboration so we only have to\n do this once.\n \"\"\"\n if self._discovered:\n return\n self._log.debug(\"Discovering all on %s\", self._name)\n for thing in self._handle.iterate(simulator.OBJECTS):\n name = thing.get_name_string()\n try:\n hdl = SimHandle(thing, self._child_path(name))\n except TestError as e:\n self._log.debug(\"%s\", e)\n continue\n\n try:\n key = self._sub_handle_key(name)\n except ValueError:\n self._log.debug(\"Unable to translate handle >%s< to a valid _sub_handle key\", hdl._name)\n continue\n\n self._sub_handles[key] = hdl\n\n self._discovered = True\n\n def _child_path(self, name) -> str:\n \"\"\"Return a string of the path of the child :any:`SimHandle` for a given *name*.\"\"\"\n return self._path + \".\" + name\n\n def _sub_handle_key(self, name):\n \"\"\"Translate the handle name to a key to use in :any:`_sub_handles` dictionary.\"\"\"\n return name.split(\".\")[-1]\n\n def __dir__(self):\n \"\"\"Permits IPython tab completion to work.\"\"\"\n self._discover_all()\n return super(RegionObject, self).__dir__() + [str(k) for k in self._sub_handles]\n\n\nclass HierarchyObject(RegionObject):\n \"\"\"Hierarchy objects are namespace/scope objects.\"\"\"\n\n def __get_sub_handle_by_name(self, name):\n try:\n return self._sub_handles[name]\n except KeyError:\n pass\n\n # Cache to avoid a call to the simulator if we already know the name is\n # invalid. Unclear if we care, but we had this before.\n if name in self._invalid_sub_handles:\n return None\n\n new_handle = self._handle.get_handle_by_name(name)\n\n if not new_handle:\n self._invalid_sub_handles.add(name)\n return None\n\n sub_handle = SimHandle(new_handle, self._child_path(name))\n self._sub_handles[name] = sub_handle\n return sub_handle\n\n def __setattr__(self, name, value):\n \"\"\"Provide transparent access to signals via the hierarchy.\n\n Slightly hacky version of operator overloading in Python.\n\n Raise an :exc:`AttributeError` if users attempt to create new members which\n don't exist in the design.\n \"\"\"\n\n # private attributes pass through directly\n if name.startswith(\"_\"):\n return SimHandleBase.__setattr__(self, name, value)\n\n # then try handles\n sub = self.__get_sub_handle_by_name(name)\n if sub is not None:\n sub.value = value\n return\n\n # compat behavior\n if name in self._compat_mapping:\n return SimHandleBase.__setattr__(self, name, value)\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n def __getattr__(self, name):\n \"\"\"Query the simulator for an object with the specified name\n and cache the result to build a tree of objects.\n \"\"\"\n if name.startswith(\"_\"):\n return SimHandleBase.__getattr__(self, name)\n\n handle = self.__get_sub_handle_by_name(name)\n if handle is not None:\n return handle\n\n if name in self._compat_mapping:\n return SimHandleBase.__getattr__(self, name)\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n def _id(self, name, extended: bool = True):\n \"\"\"Query the simulator for an object with the specified *name*,\n and cache the result to build a tree of objects.\n\n If *extended* is ``True``, run the query only for VHDL extended identifiers.\n For Verilog, only ``extended=False`` is supported.\n\n :meta public:\n \"\"\"\n if extended:\n name = \"\\\\\"+name+\"\\\\\"\n\n handle = self.__get_sub_handle_by_name(name)\n if handle is not None:\n return handle\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n\nclass HierarchyArrayObject(RegionObject):\n \"\"\"Hierarchy Arrays are containers of Hierarchy Objects.\"\"\"\n\n def _sub_handle_key(self, name):\n \"\"\"Translate the handle name to a key to use in :any:`_sub_handles` dictionary.\"\"\"\n # This is slightly hacky, but we need to extract the index from the name\n #\n # FLI and VHPI(IUS): _name(X) where X is the index\n # VHPI(ALDEC): _name__X where X is the index\n # VPI: _name[X] where X is the index\n import re\n result = re.match(r\"{0}__(?P<index>\\d+)$\".format(self._name), name)\n if not result:\n result = re.match(r\"{0}\\((?P<index>\\d+)\\)$\".format(self._name), name)\n if not result:\n result = re.match(r\"{0}\\[(?P<index>\\d+)\\]$\".format(self._name), name)\n\n if result:\n return int(result.group(\"index\"))\n else:\n raise ValueError(\"Unable to match an index pattern: {}\".format(name))\n\n def __len__(self):\n \"\"\"Return the \"length\" of the generate block.\"\"\"\n if self._len is None:\n if not self._discovered:\n self._discover_all()\n\n self._len = len(self._sub_handles)\n return self._len\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n raise IndexError(\"Slice indexing is not supported\")\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = self._handle.get_handle_by_index(index)\n if not new_handle:\n raise IndexError(\"%s contains no object at index %d\" % (self._name, index))\n path = self._path + \"[\" + str(index) + \"]\"\n self._sub_handles[index] = SimHandle(new_handle, path)\n return self._sub_handles[index]\n\n def _child_path(self, name):\n \"\"\"Return a string of the path of the child :any:`SimHandle` for a given name.\"\"\"\n index = self._sub_handle_key(name)\n return self._path + \"[\" + str(index) + \"]\"\n\n def __setitem__(self, index, value):\n raise TypeError(\"Not permissible to set %s at index %d\" % (self._name, index))\n\n\nclass _AssignmentResult:\n \"\"\"\n An object that exists solely to provide an error message if the caller\n is not aware of cocotb's meaning of ``<=``.\n \"\"\"\n\n def __init__(self, signal, value):\n self._signal = signal\n self._value = value\n\n def __bool__(self):\n raise TypeError(\n \"Attempted to use `{0._signal!r} <= {0._value!r}` (a cocotb \"\n \"delayed write) as if it were a numeric comparison. To perform \"\n \"comparison, use `{0._signal!r}.value <= {0._value!r}` instead.\"\n .format(self)\n )\n\n\nclass NonHierarchyObject(SimHandleBase):\n \"\"\"Common base class for all non-hierarchy objects.\"\"\"\n\n def __iter__(self):\n return iter(())\n\n @property\n def value(self):\n \"\"\"The value of this simulation object.\n\n .. note::\n When setting this property, the value is stored by the :class:`~cocotb.scheduler.Scheduler`\n and all stored values are written at the same time at the end of the current simulator time step.\n\n Use :meth:`setimmediatevalue` to set the value immediately.\n \"\"\"\n raise TypeError(\"Not permissible to get values of object %s of type %s\" % (self._name, type(self)))\n\n @value.setter\n def value(self, value):\n self._set_value(value, cocotb.scheduler._schedule_write)\n\n def setimmediatevalue(self, value):\n \"\"\" Assign a value to this simulation object immediately. \"\"\"\n def _call_now(handle, f, *args):\n f(*args)\n self._set_value(value, _call_now)\n\n def _set_value(self, value, call_sim):\n \"\"\" This should be overriden in subclasses.\n\n This is used to implement both the setter for :attr:`value`, and the\n :meth:`setimmediatevalue` method.\n\n ``call_sim(handle, f, *args)`` should be used to schedule simulator writes,\n rather than performing them directly as ``f(*args)``.\n \"\"\"\n raise TypeError(\"Not permissible to set values on object %s of type %s\" % (self._name, type(self)))\n\n def __le__(self, value):\n \"\"\"Overload less-than-or-equal-to operator to provide an HDL-like shortcut.\n\n Example:\n >>> module.signal <= 2\n \"\"\"\n self.value = value\n return _AssignmentResult(self, value)\n\n def __eq__(self, other):\n \"\"\"Equality comparator for non-hierarchy objects\n\n If ``other`` is not a :class:`SimHandleBase` instance the comparision\n uses the comparison method of the ``other`` object against our\n ``.value``.\n \"\"\"\n if isinstance(other, SimHandleBase):\n return SimHandleBase.__eq__(self, other)\n return self.value == other\n\n def __ne__(self, other):\n if isinstance(other, SimHandleBase):\n return SimHandleBase.__ne__(self, other)\n return self.value != other\n\n # Re-define hash because we defined __eq__\n def __hash__(self):\n return SimHandleBase.__hash__(self)\n\n\nclass ConstantObject(NonHierarchyObject):\n \"\"\"An object which has a value that can be read, but not set.\n\n The value is cached in the class since it is fixed at elaboration\n time and won't change within a simulation.\n \"\"\"\n\n def __init__(self, handle, path, handle_type):\n \"\"\"\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n handle_type: The type of the handle\n (``simulator.INTEGER``, ``simulator.ENUM``,\n ``simulator.REAL``, ``simulator.STRING``).\n \"\"\"\n NonHierarchyObject.__init__(self, handle, path)\n if handle_type in [simulator.INTEGER, simulator.ENUM]:\n self._value = self._handle.get_signal_val_long()\n elif handle_type == simulator.REAL:\n self._value = self._handle.get_signal_val_real()\n elif handle_type == simulator.STRING:\n self._value = self._handle.get_signal_val_str()\n else:\n val = self._handle.get_signal_val_binstr()\n self._value = BinaryValue(n_bits=len(val))\n try:\n self._value.binstr = val\n except Exception:\n self._value = val\n\n def __int__(self):\n return int(self.value)\n\n def __float__(self):\n return float(self.value)\n\n @NonHierarchyObject.value.getter\n def value(self):\n \"\"\"The value of this simulation object.\"\"\"\n return self._value\n\n def __str__(self):\n if isinstance(self.value, bytes):\n StringObject._emit_str_warning(self)\n return self.value.decode('ascii')\n else:\n ModifiableObject._emit_str_warning(self)\n return str(self.value)\n\n\nclass NonHierarchyIndexableObject(NonHierarchyObject):\n \"\"\" A non-hierarchy indexable object.\n\n Getting and setting the current value of an array is done\n by iterating through sub-handles in left-to-right order.\n\n Given an HDL array ``arr``:\n\n +--------------+---------------------+--------------------------------------------------------------+\n | Verilog | VHDL | ``arr.value`` is equivalent to |\n +==============+=====================+==============================================================+\n | ``arr[4:7]`` | ``arr(4 to 7)`` | ``[arr[4].value, arr[5].value, arr[6].value, arr[7].value]`` |\n +--------------+---------------------+--------------------------------------------------------------+\n | ``arr[7:4]`` | ``arr(7 downto 4)`` | ``[arr[7].value, arr[6].value, arr[5].value, arr[4].value]`` |\n +--------------+---------------------+--------------------------------------------------------------+\n\n When setting the signal as in ``arr.value = ...``, the same index equivalence as noted in the table holds.\n\n .. warning::\n Assigning a value to a sub-handle:\n\n - **Wrong**: ``dut.some_array.value[0] = 1`` (gets value as a list then updates index 0)\n - **Correct**: ``dut.some_array[0].value = 1``\n \"\"\"\n\n def __init__(self, handle, path):\n NonHierarchyObject.__init__(self, handle, path)\n self._range = self._handle.get_range()\n\n def __setitem__(self, index, value):\n \"\"\"Provide transparent assignment to indexed array handles.\"\"\"\n self[index].value = value\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n raise IndexError(\"Slice indexing is not supported\")\n if self._range is None:\n raise IndexError(\"%s is not indexable. Unable to get object at index %d\" % (self._fullname, index))\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = self._handle.get_handle_by_index(index)\n if not new_handle:\n raise IndexError(\"%s contains no object at index %d\" % (self._fullname, index))\n path = self._path + \"[\" + str(index) + \"]\"\n self._sub_handles[index] = SimHandle(new_handle, path)\n return self._sub_handles[index]\n\n def __iter__(self):\n if self._range is None:\n return\n\n self._log.debug(\"Iterating with range [%d:%d]\", self._range[0], self._range[1])\n for i in self._range_iter(self._range[0], self._range[1]):\n try:\n result = self[i]\n yield result\n except IndexError:\n continue\n\n def _range_iter(self, left, right):\n if left > right:\n while left >= right:\n yield left\n left = left - 1\n else:\n while left <= right:\n yield left\n left = left + 1\n\n @NonHierarchyObject.value.getter\n def value(self) -> list:\n # Don't use self.__iter__, because it has an unwanted `except IndexError`\n return [\n self[i].value\n for i in self._range_iter(self._range[0], self._range[1])\n ]\n\n def _set_value(self, value, call_sim):\n \"\"\"Assign value from a list of same length to an array in left-to-right order.\n Index 0 of the list maps to the left-most index in the array.\n\n See the docstring for this class.\n \"\"\"\n if type(value) is not list:\n raise TypeError(\"Assigning non-list value to object %s of type %s\" % (self._name, type(self)))\n if len(value) != len(self):\n raise ValueError(\"Assigning list of length %d to object %s of length %d\" % (\n len(value), self._name, len(self)))\n for val_idx, self_idx in enumerate(self._range_iter(self._range[0], self._range[1])):\n self[self_idx]._set_value(value[val_idx], call_sim)\n\n\nclass NonConstantObject(NonHierarchyIndexableObject):\n \"\"\" A non-constant object\"\"\"\n # FIXME: what is the difference to ModifiableObject? Explain in docstring.\n\n def drivers(self):\n \"\"\"An iterator for gathering all drivers for a signal.\"\"\"\n return self._handle.iterate(simulator.DRIVERS)\n\n def loads(self):\n \"\"\"An iterator for gathering all loads on a signal.\"\"\"\n return self._handle.iterate(simulator.LOADS)\n\n\nclass _SetAction:\n \"\"\"Base class representing the type of action used while write-accessing a handle.\"\"\"\n pass\n\n\nclass _SetValueAction(_SetAction):\n __slots__ = (\"value\",)\n \"\"\"Base class representing the type of action used while write-accessing a handle with a value.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n\nclass Deposit(_SetValueAction):\n \"\"\"Action used for placing a value into a given handle.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return self.value, 0 # GPI_DEPOSIT\n\n\nclass Force(_SetValueAction):\n \"\"\"Action used to force a handle to a given value until a release is applied.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return self.value, 1 # GPI_FORCE\n\n\nclass Freeze(_SetAction):\n \"\"\"Action used to make a handle keep its current value until a release is used.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return hdl.value, 1 # GPI_FORCE\n\n\nclass Release(_SetAction):\n \"\"\"Action used to stop the effects of a previously applied force/freeze action.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return 0, 2 # GPI_RELEASE\n\n\nclass ModifiableObject(NonConstantObject):\n \"\"\"Base class for simulator objects whose values can be modified.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n We determine the library call to make based on the type of the value\n because assigning integers less than 32 bits is faster.\n\n Args:\n value (ctypes.Structure, cocotb.binary.BinaryValue, int, double):\n The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target is not wide enough or has an unsupported type\n for value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, int) and value < 0x7fffffff and len(self) <= 32:\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n return\n if isinstance(value, ctypes.Structure):\n value = BinaryValue(value=cocotb.utils.pack(value), n_bits=len(self))\n elif isinstance(value, int):\n value = BinaryValue(value=value, n_bits=len(self), bigEndian=False)\n elif isinstance(value, dict):\n # We're given a dictionary with a list of values and a bit size...\n num = 0\n vallist = list(value[\"values\"])\n vallist.reverse()\n if len(vallist) * value[\"bits\"] != len(self):\n raise TypeError(\"Unable to set with array length %d of %d bit entries = %d total, target is only %d bits long\" %\n (len(value[\"values\"]), value[\"bits\"], len(value[\"values\"]) * value[\"bits\"], len(self)))\n\n for val in vallist:\n num = (num << value[\"bits\"]) + val\n value = BinaryValue(value=num, n_bits=len(self), bigEndian=False)\n\n elif not isinstance(value, BinaryValue):\n raise TypeError(\n \"Unsupported type for value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_binstr, set_action, value.binstr)\n\n def _check_for_set_action(self, value):\n if not isinstance(value, _SetAction):\n return value, 0 # GPI_DEPOSIT\n return value._as_gpi_args_for(self)\n\n @NonConstantObject.value.getter\n def value(self) -> BinaryValue:\n binstr = self._handle.get_signal_val_binstr()\n result = BinaryValue(binstr, len(binstr))\n return result\n\n def __int__(self):\n return int(self.value)\n\n def _emit_str_warning(self):\n warnings.warn(\n \"`str({t})` is deprecated, and in future will return `{t}._path`. \"\n \"To get a string representation of the value, use `str({t}.value)`.\"\n .format(t=type(self).__qualname__),\n FutureWarning, stacklevel=3)\n\n def __str__(self):\n self._emit_str_warning()\n return str(self.value)\n\n\nclass RealObject(ModifiableObject):\n \"\"\"Specific object handle for Real signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to value.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (float): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n real value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n try:\n value = float(value)\n except ValueError:\n raise TypeError(\n \"Unsupported type for real value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_real, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> float:\n return self._handle.get_signal_val_real()\n\n def __float__(self):\n return float(self.value)\n\n\nclass EnumObject(ModifiableObject):\n \"\"\"Specific object handle for enumeration signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (int): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n integer value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, BinaryValue):\n value = int(value)\n elif not isinstance(value, int):\n raise TypeError(\n \"Unsupported type for enum value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> int:\n return self._handle.get_signal_val_long()\n\n\nclass IntegerObject(ModifiableObject):\n \"\"\"Specific object handle for Integer and Enum signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (int): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n integer value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, BinaryValue):\n value = int(value)\n elif not isinstance(value, int):\n raise TypeError(\n \"Unsupported type for integer value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> int:\n return self._handle.get_signal_val_long()\n\n\nclass StringObject(ModifiableObject):\n \"\"\"Specific object handle for String variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (bytes): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n string value assignment.\n\n .. versionchanged:: 1.4\n Takes :class:`bytes` instead of :class:`str`.\n Users are now expected to choose an encoding when using these objects.\n As a convenience, when assigning :class:`str` values, ASCII encoding will be used as a safe default.\n\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, str):\n warnings.warn(\n \"Handles on string objects will soon not accept `str` objects. \"\n \"Please use a bytes object by encoding the string as you see fit. \"\n \"`str.encode('ascii')` is typically sufficient.\", DeprecationWarning, stacklevel=2)\n value = value.encode('ascii') # may throw UnicodeEncodeError\n\n if not isinstance(value, bytes):\n raise TypeError(\n \"Unsupported type for string value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_str, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> bytes:\n return self._handle.get_signal_val_str()\n\n def _emit_str_warning(self):\n warnings.warn(\n \"`str({t})` is deprecated, and in future will return `{t}._path`. \"\n \"To access the `bytes` value of this handle, use `{t}.value`.\"\n .format(t=type(self).__qualname__),\n FutureWarning, stacklevel=3)\n\n def __str__(self):\n self._emit_str_warning()\n return self.value.decode('ascii')\n\n\n_handle2obj = {}\n\n\ndef SimHandle(handle, path=None):\n \"\"\"Factory function to create the correct type of `SimHandle` object.\n\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n\n Returns:\n The `SimHandle` object.\n\n Raises:\n TestError: If no matching object for GPI type could be found.\n \"\"\"\n _type2cls = {\n simulator.MODULE: HierarchyObject,\n simulator.STRUCTURE: HierarchyObject,\n simulator.REG: ModifiableObject,\n simulator.NET: ModifiableObject,\n simulator.NETARRAY: NonHierarchyIndexableObject,\n simulator.REAL: RealObject,\n simulator.INTEGER: IntegerObject,\n simulator.ENUM: EnumObject,\n simulator.STRING: StringObject,\n simulator.GENARRAY: HierarchyArrayObject,\n }\n\n # Enforce singletons since it's possible to retrieve handles avoiding\n # the hierarchy by getting driver/load information\n global _handle2obj\n try:\n return _handle2obj[handle]\n except KeyError:\n pass\n\n t = handle.get_type()\n\n # Special case for constants\n if handle.get_const() and t not in [\n simulator.MODULE,\n simulator.STRUCTURE,\n simulator.NETARRAY,\n simulator.GENARRAY,\n ]:\n obj = ConstantObject(handle, path, t)\n _handle2obj[handle] = obj\n return obj\n\n if t not in _type2cls:\n raise TestError(\"Couldn't find a matching object for GPI type %d (path=%s)\" % (t, path))\n obj = _type2cls[t](handle, path)\n _handle2obj[handle] = obj\n return obj\n", "path": "cocotb/handle.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# -*- coding: utf-8 -*-\n\nimport ctypes\nimport warnings\n\nimport cocotb\nfrom cocotb import simulator\nfrom cocotb.binary import BinaryValue\nfrom cocotb.log import SimLog\nfrom cocotb.result import TestError\n\n# Only issue a warning for each deprecated attribute access\n_deprecation_warned = set()\n\n\nclass SimHandleBase:\n \"\"\"Base class for all simulation objects.\n\n We maintain a handle which we can use for GPI calls.\n \"\"\"\n\n # For backwards compatibility we support a mapping of old member names\n # which may alias with the simulator hierarchy. In these cases the\n # simulator result takes priority, only falling back to the python member\n # if there is no colliding object in the elaborated design.\n _compat_mapping = {\n \"log\" : \"_log\",\n \"fullname\" : \"_fullname\",\n \"name\" : \"_name\",\n }\n\n def __init__(self, handle, path):\n \"\"\"\n .. Constructor. This RST comment works around sphinx-doc/sphinx#6885\n\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n \"\"\"\n self._handle = handle\n self._len = None # type: int\n \"\"\"The \"length\" (the number of elements) of the underlying object. For vectors this is the number of bits.\"\"\"\n self._sub_handles = {} # type: dict\n \"\"\"Dictionary of this handle's children.\"\"\"\n self._invalid_sub_handles = set() # type: set\n \"\"\"Python :class:`set` of invalid queries, for caching purposes.\"\"\"\n self._name = self._handle.get_name_string() # type: str\n \"\"\"The name of an object.\n\n :meta public:\n \"\"\"\n self._type = self._handle.get_type_string() # type: str\n \"\"\"The type of an object as a string.\n\n :meta public:\n \"\"\"\n self._fullname = self._name + \"(%s)\" % self._type # type: str\n \"\"\"The name of an object with its type appended in parentheses.\"\"\"\n self._path = self._name if path is None else path # type: str\n \"\"\"The path to this handle, or its name if this is the root handle.\n\n :meta public:\n \"\"\"\n self._log = SimLog(\"cocotb.%s\" % self._name)\n \"\"\"The logging object.\"\"\"\n self._log.debug(\"Created\")\n self._def_name = self._handle.get_definition_name() # type: str\n \"\"\"The name of a GPI object's definition.\n\n :meta public:\n \"\"\"\n self._def_file = self._handle.get_definition_file() # type: str\n \"\"\"The file that sources the object's definition.\n\n :meta public:\n \"\"\"\n\n def get_definition_name(self):\n return self._def_name\n\n def get_definition_file(self):\n return self._def_file\n\n def __hash__(self):\n return hash(self._handle)\n\n def __len__(self):\n \"\"\"Return the \"length\" (the number of elements) of the underlying object.\n\n For vectors this is the number of bits.\n \"\"\"\n if self._len is None:\n self._len = self._handle.get_num_elems()\n return self._len\n\n def __eq__(self, other):\n \"\"\"Equality comparator for handles\n\n Example usage::\n\n if clk == dut.clk:\n do_something()\n \"\"\"\n if not isinstance(other, SimHandleBase):\n return NotImplemented\n return self._handle == other._handle\n\n def __ne__(self, other):\n if not isinstance(other, SimHandleBase):\n return NotImplemented\n return self._handle != other._handle\n\n def __repr__(self):\n desc = self._path\n defname = self._def_name\n if defname:\n desc += \" with definition \"+defname\n deffile = self._def_file\n if deffile:\n desc += \" (at \"+deffile+\")\"\n return type(self).__qualname__ + \"(\" + desc + \")\"\n\n def __str__(self):\n return self._path\n\n def __setattr__(self, name, value):\n if name in self._compat_mapping:\n if name not in _deprecation_warned:\n warnings.warn(\"Use of attribute %r is deprecated, use %r instead\" % (name, self._compat_mapping[name]))\n _deprecation_warned.add(name)\n return setattr(self, self._compat_mapping[name], value)\n else:\n return object.__setattr__(self, name, value)\n\n def __getattr__(self, name):\n if name in self._compat_mapping:\n if name not in _deprecation_warned:\n warnings.warn(\"Use of attribute %r is deprecated, use %r instead\" % (name, self._compat_mapping[name]))\n _deprecation_warned.add(name)\n return getattr(self, self._compat_mapping[name])\n else:\n return object.__getattribute__(self, name)\n\n\nclass RegionObject(SimHandleBase):\n \"\"\"A region object, such as a scope or namespace.\n\n Region objects don't have values, they are effectively scopes or namespaces.\n \"\"\"\n\n def __init__(self, handle, path):\n SimHandleBase.__init__(self, handle, path)\n self._discovered = False # True if this object has already been discovered\n\n def __iter__(self):\n \"\"\"Iterate over all known objects in this layer of hierarchy.\"\"\"\n if not self._discovered:\n self._discover_all()\n\n for name, handle in self._sub_handles.items():\n if isinstance(handle, list):\n self._log.debug(\"Found index list length %d\", len(handle))\n for subindex, subhdl in enumerate(handle):\n if subhdl is None:\n self._log.warning(\"Index %d doesn't exist in %s.%s\", subindex, self._name, name)\n continue\n self._log.debug(\"Yielding index %d from %s (%s)\", subindex, name, type(subhdl))\n yield subhdl\n else:\n self._log.debug(\"Yielding %s (%s)\", name, handle)\n yield handle\n\n def _discover_all(self):\n \"\"\"When iterating or performing IPython tab completion, we run through ahead of\n time and discover all possible children, populating the :any:`_sub_handles`\n mapping. Hierarchy can't change after elaboration so we only have to\n do this once.\n \"\"\"\n if self._discovered:\n return\n self._log.debug(\"Discovering all on %s\", self._name)\n for thing in self._handle.iterate(simulator.OBJECTS):\n name = thing.get_name_string()\n try:\n hdl = SimHandle(thing, self._child_path(name))\n except TestError as e:\n self._log.debug(\"%s\", e)\n continue\n\n try:\n key = self._sub_handle_key(name)\n except ValueError:\n self._log.debug(\"Unable to translate handle >%s< to a valid _sub_handle key\", hdl._name)\n continue\n\n self._sub_handles[key] = hdl\n\n self._discovered = True\n\n def _child_path(self, name) -> str:\n \"\"\"Return a string of the path of the child :any:`SimHandle` for a given *name*.\"\"\"\n return self._path + \".\" + name\n\n def _sub_handle_key(self, name):\n \"\"\"Translate the handle name to a key to use in :any:`_sub_handles` dictionary.\"\"\"\n return name.split(\".\")[-1]\n\n def __dir__(self):\n \"\"\"Permits IPython tab completion to work.\"\"\"\n self._discover_all()\n return super(RegionObject, self).__dir__() + [str(k) for k in self._sub_handles]\n\n\nclass HierarchyObject(RegionObject):\n \"\"\"Hierarchy objects are namespace/scope objects.\"\"\"\n\n def __get_sub_handle_by_name(self, name):\n try:\n return self._sub_handles[name]\n except KeyError:\n pass\n\n if not self._discovered:\n self._discover_all()\n return self.__get_sub_handle_by_name(name)\n\n # Cache to avoid a call to the simulator if we already know the name is\n # invalid. Unclear if we care, but we had this before.\n if name in self._invalid_sub_handles:\n return None\n\n new_handle = self._handle.get_handle_by_name(name)\n\n if not new_handle:\n self._invalid_sub_handles.add(name)\n return None\n\n sub_handle = SimHandle(new_handle, self._child_path(name))\n self._sub_handles[name] = sub_handle\n return sub_handle\n\n def __setattr__(self, name, value):\n \"\"\"Provide transparent access to signals via the hierarchy.\n\n Slightly hacky version of operator overloading in Python.\n\n Raise an :exc:`AttributeError` if users attempt to create new members which\n don't exist in the design.\n \"\"\"\n\n # private attributes pass through directly\n if name.startswith(\"_\"):\n return SimHandleBase.__setattr__(self, name, value)\n\n # then try handles\n sub = self.__get_sub_handle_by_name(name)\n if sub is not None:\n sub.value = value\n return\n\n # compat behavior\n if name in self._compat_mapping:\n return SimHandleBase.__setattr__(self, name, value)\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n def __getattr__(self, name):\n \"\"\"Query the simulator for an object with the specified name\n and cache the result to build a tree of objects.\n \"\"\"\n if name.startswith(\"_\"):\n return SimHandleBase.__getattr__(self, name)\n\n handle = self.__get_sub_handle_by_name(name)\n if handle is not None:\n return handle\n\n if name in self._compat_mapping:\n return SimHandleBase.__getattr__(self, name)\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n def _id(self, name, extended: bool = True):\n \"\"\"Query the simulator for an object with the specified *name*,\n and cache the result to build a tree of objects.\n\n If *extended* is ``True``, run the query only for VHDL extended identifiers.\n For Verilog, only ``extended=False`` is supported.\n\n :meta public:\n \"\"\"\n if extended:\n name = \"\\\\\"+name+\"\\\\\"\n\n handle = self.__get_sub_handle_by_name(name)\n if handle is not None:\n return handle\n\n raise AttributeError(\"%s contains no object named %s\" % (self._name, name))\n\n\nclass HierarchyArrayObject(RegionObject):\n \"\"\"Hierarchy Arrays are containers of Hierarchy Objects.\"\"\"\n\n def _sub_handle_key(self, name):\n \"\"\"Translate the handle name to a key to use in :any:`_sub_handles` dictionary.\"\"\"\n # This is slightly hacky, but we need to extract the index from the name\n #\n # FLI and VHPI(IUS): _name(X) where X is the index\n # VHPI(ALDEC): _name__X where X is the index\n # VPI: _name[X] where X is the index\n import re\n result = re.match(r\"{0}__(?P<index>\\d+)$\".format(self._name), name)\n if not result:\n result = re.match(r\"{0}\\((?P<index>\\d+)\\)$\".format(self._name), name)\n if not result:\n result = re.match(r\"{0}\\[(?P<index>\\d+)\\]$\".format(self._name), name)\n\n if result:\n return int(result.group(\"index\"))\n else:\n raise ValueError(\"Unable to match an index pattern: {}\".format(name))\n\n def __len__(self):\n \"\"\"Return the \"length\" of the generate block.\"\"\"\n if self._len is None:\n if not self._discovered:\n self._discover_all()\n\n self._len = len(self._sub_handles)\n return self._len\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n raise IndexError(\"Slice indexing is not supported\")\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = self._handle.get_handle_by_index(index)\n if not new_handle:\n raise IndexError(\"%s contains no object at index %d\" % (self._name, index))\n path = self._path + \"[\" + str(index) + \"]\"\n self._sub_handles[index] = SimHandle(new_handle, path)\n return self._sub_handles[index]\n\n def _child_path(self, name):\n \"\"\"Return a string of the path of the child :any:`SimHandle` for a given name.\"\"\"\n index = self._sub_handle_key(name)\n return self._path + \"[\" + str(index) + \"]\"\n\n def __setitem__(self, index, value):\n raise TypeError(\"Not permissible to set %s at index %d\" % (self._name, index))\n\n\nclass _AssignmentResult:\n \"\"\"\n An object that exists solely to provide an error message if the caller\n is not aware of cocotb's meaning of ``<=``.\n \"\"\"\n\n def __init__(self, signal, value):\n self._signal = signal\n self._value = value\n\n def __bool__(self):\n raise TypeError(\n \"Attempted to use `{0._signal!r} <= {0._value!r}` (a cocotb \"\n \"delayed write) as if it were a numeric comparison. To perform \"\n \"comparison, use `{0._signal!r}.value <= {0._value!r}` instead.\"\n .format(self)\n )\n\n\nclass NonHierarchyObject(SimHandleBase):\n \"\"\"Common base class for all non-hierarchy objects.\"\"\"\n\n def __iter__(self):\n return iter(())\n\n @property\n def value(self):\n \"\"\"The value of this simulation object.\n\n .. note::\n When setting this property, the value is stored by the :class:`~cocotb.scheduler.Scheduler`\n and all stored values are written at the same time at the end of the current simulator time step.\n\n Use :meth:`setimmediatevalue` to set the value immediately.\n \"\"\"\n raise TypeError(\"Not permissible to get values of object %s of type %s\" % (self._name, type(self)))\n\n @value.setter\n def value(self, value):\n self._set_value(value, cocotb.scheduler._schedule_write)\n\n def setimmediatevalue(self, value):\n \"\"\" Assign a value to this simulation object immediately. \"\"\"\n def _call_now(handle, f, *args):\n f(*args)\n self._set_value(value, _call_now)\n\n def _set_value(self, value, call_sim):\n \"\"\" This should be overriden in subclasses.\n\n This is used to implement both the setter for :attr:`value`, and the\n :meth:`setimmediatevalue` method.\n\n ``call_sim(handle, f, *args)`` should be used to schedule simulator writes,\n rather than performing them directly as ``f(*args)``.\n \"\"\"\n raise TypeError(\"Not permissible to set values on object %s of type %s\" % (self._name, type(self)))\n\n def __le__(self, value):\n \"\"\"Overload less-than-or-equal-to operator to provide an HDL-like shortcut.\n\n Example:\n >>> module.signal <= 2\n \"\"\"\n self.value = value\n return _AssignmentResult(self, value)\n\n def __eq__(self, other):\n \"\"\"Equality comparator for non-hierarchy objects\n\n If ``other`` is not a :class:`SimHandleBase` instance the comparision\n uses the comparison method of the ``other`` object against our\n ``.value``.\n \"\"\"\n if isinstance(other, SimHandleBase):\n return SimHandleBase.__eq__(self, other)\n return self.value == other\n\n def __ne__(self, other):\n if isinstance(other, SimHandleBase):\n return SimHandleBase.__ne__(self, other)\n return self.value != other\n\n # Re-define hash because we defined __eq__\n def __hash__(self):\n return SimHandleBase.__hash__(self)\n\n\nclass ConstantObject(NonHierarchyObject):\n \"\"\"An object which has a value that can be read, but not set.\n\n The value is cached in the class since it is fixed at elaboration\n time and won't change within a simulation.\n \"\"\"\n\n def __init__(self, handle, path, handle_type):\n \"\"\"\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n handle_type: The type of the handle\n (``simulator.INTEGER``, ``simulator.ENUM``,\n ``simulator.REAL``, ``simulator.STRING``).\n \"\"\"\n NonHierarchyObject.__init__(self, handle, path)\n if handle_type in [simulator.INTEGER, simulator.ENUM]:\n self._value = self._handle.get_signal_val_long()\n elif handle_type == simulator.REAL:\n self._value = self._handle.get_signal_val_real()\n elif handle_type == simulator.STRING:\n self._value = self._handle.get_signal_val_str()\n else:\n val = self._handle.get_signal_val_binstr()\n self._value = BinaryValue(n_bits=len(val))\n try:\n self._value.binstr = val\n except Exception:\n self._value = val\n\n def __int__(self):\n return int(self.value)\n\n def __float__(self):\n return float(self.value)\n\n @NonHierarchyObject.value.getter\n def value(self):\n \"\"\"The value of this simulation object.\"\"\"\n return self._value\n\n def __str__(self):\n if isinstance(self.value, bytes):\n StringObject._emit_str_warning(self)\n return self.value.decode('ascii')\n else:\n ModifiableObject._emit_str_warning(self)\n return str(self.value)\n\n\nclass NonHierarchyIndexableObject(NonHierarchyObject):\n \"\"\" A non-hierarchy indexable object.\n\n Getting and setting the current value of an array is done\n by iterating through sub-handles in left-to-right order.\n\n Given an HDL array ``arr``:\n\n +--------------+---------------------+--------------------------------------------------------------+\n | Verilog | VHDL | ``arr.value`` is equivalent to |\n +==============+=====================+==============================================================+\n | ``arr[4:7]`` | ``arr(4 to 7)`` | ``[arr[4].value, arr[5].value, arr[6].value, arr[7].value]`` |\n +--------------+---------------------+--------------------------------------------------------------+\n | ``arr[7:4]`` | ``arr(7 downto 4)`` | ``[arr[7].value, arr[6].value, arr[5].value, arr[4].value]`` |\n +--------------+---------------------+--------------------------------------------------------------+\n\n When setting the signal as in ``arr.value = ...``, the same index equivalence as noted in the table holds.\n\n .. warning::\n Assigning a value to a sub-handle:\n\n - **Wrong**: ``dut.some_array.value[0] = 1`` (gets value as a list then updates index 0)\n - **Correct**: ``dut.some_array[0].value = 1``\n \"\"\"\n\n def __init__(self, handle, path):\n NonHierarchyObject.__init__(self, handle, path)\n self._range = self._handle.get_range()\n\n def __setitem__(self, index, value):\n \"\"\"Provide transparent assignment to indexed array handles.\"\"\"\n self[index].value = value\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n raise IndexError(\"Slice indexing is not supported\")\n if self._range is None:\n raise IndexError(\"%s is not indexable. Unable to get object at index %d\" % (self._fullname, index))\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = self._handle.get_handle_by_index(index)\n if not new_handle:\n raise IndexError(\"%s contains no object at index %d\" % (self._fullname, index))\n path = self._path + \"[\" + str(index) + \"]\"\n self._sub_handles[index] = SimHandle(new_handle, path)\n return self._sub_handles[index]\n\n def __iter__(self):\n if self._range is None:\n return\n\n self._log.debug(\"Iterating with range [%d:%d]\", self._range[0], self._range[1])\n for i in self._range_iter(self._range[0], self._range[1]):\n try:\n result = self[i]\n yield result\n except IndexError:\n continue\n\n def _range_iter(self, left, right):\n if left > right:\n while left >= right:\n yield left\n left = left - 1\n else:\n while left <= right:\n yield left\n left = left + 1\n\n @NonHierarchyObject.value.getter\n def value(self) -> list:\n # Don't use self.__iter__, because it has an unwanted `except IndexError`\n return [\n self[i].value\n for i in self._range_iter(self._range[0], self._range[1])\n ]\n\n def _set_value(self, value, call_sim):\n \"\"\"Assign value from a list of same length to an array in left-to-right order.\n Index 0 of the list maps to the left-most index in the array.\n\n See the docstring for this class.\n \"\"\"\n if type(value) is not list:\n raise TypeError(\"Assigning non-list value to object %s of type %s\" % (self._name, type(self)))\n if len(value) != len(self):\n raise ValueError(\"Assigning list of length %d to object %s of length %d\" % (\n len(value), self._name, len(self)))\n for val_idx, self_idx in enumerate(self._range_iter(self._range[0], self._range[1])):\n self[self_idx]._set_value(value[val_idx], call_sim)\n\n\nclass NonConstantObject(NonHierarchyIndexableObject):\n \"\"\" A non-constant object\"\"\"\n # FIXME: what is the difference to ModifiableObject? Explain in docstring.\n\n def drivers(self):\n \"\"\"An iterator for gathering all drivers for a signal.\"\"\"\n return self._handle.iterate(simulator.DRIVERS)\n\n def loads(self):\n \"\"\"An iterator for gathering all loads on a signal.\"\"\"\n return self._handle.iterate(simulator.LOADS)\n\n\nclass _SetAction:\n \"\"\"Base class representing the type of action used while write-accessing a handle.\"\"\"\n pass\n\n\nclass _SetValueAction(_SetAction):\n __slots__ = (\"value\",)\n \"\"\"Base class representing the type of action used while write-accessing a handle with a value.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n\nclass Deposit(_SetValueAction):\n \"\"\"Action used for placing a value into a given handle.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return self.value, 0 # GPI_DEPOSIT\n\n\nclass Force(_SetValueAction):\n \"\"\"Action used to force a handle to a given value until a release is applied.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return self.value, 1 # GPI_FORCE\n\n\nclass Freeze(_SetAction):\n \"\"\"Action used to make a handle keep its current value until a release is used.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return hdl.value, 1 # GPI_FORCE\n\n\nclass Release(_SetAction):\n \"\"\"Action used to stop the effects of a previously applied force/freeze action.\"\"\"\n\n def _as_gpi_args_for(self, hdl):\n return 0, 2 # GPI_RELEASE\n\n\nclass ModifiableObject(NonConstantObject):\n \"\"\"Base class for simulator objects whose values can be modified.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n We determine the library call to make based on the type of the value\n because assigning integers less than 32 bits is faster.\n\n Args:\n value (ctypes.Structure, cocotb.binary.BinaryValue, int, double):\n The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target is not wide enough or has an unsupported type\n for value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, int) and value < 0x7fffffff and len(self) <= 32:\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n return\n if isinstance(value, ctypes.Structure):\n value = BinaryValue(value=cocotb.utils.pack(value), n_bits=len(self))\n elif isinstance(value, int):\n value = BinaryValue(value=value, n_bits=len(self), bigEndian=False)\n elif isinstance(value, dict):\n # We're given a dictionary with a list of values and a bit size...\n num = 0\n vallist = list(value[\"values\"])\n vallist.reverse()\n if len(vallist) * value[\"bits\"] != len(self):\n raise TypeError(\"Unable to set with array length %d of %d bit entries = %d total, target is only %d bits long\" %\n (len(value[\"values\"]), value[\"bits\"], len(value[\"values\"]) * value[\"bits\"], len(self)))\n\n for val in vallist:\n num = (num << value[\"bits\"]) + val\n value = BinaryValue(value=num, n_bits=len(self), bigEndian=False)\n\n elif not isinstance(value, BinaryValue):\n raise TypeError(\n \"Unsupported type for value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_binstr, set_action, value.binstr)\n\n def _check_for_set_action(self, value):\n if not isinstance(value, _SetAction):\n return value, 0 # GPI_DEPOSIT\n return value._as_gpi_args_for(self)\n\n @NonConstantObject.value.getter\n def value(self) -> BinaryValue:\n binstr = self._handle.get_signal_val_binstr()\n result = BinaryValue(binstr, len(binstr))\n return result\n\n def __int__(self):\n return int(self.value)\n\n def _emit_str_warning(self):\n warnings.warn(\n \"`str({t})` is deprecated, and in future will return `{t}._path`. \"\n \"To get a string representation of the value, use `str({t}.value)`.\"\n .format(t=type(self).__qualname__),\n FutureWarning, stacklevel=3)\n\n def __str__(self):\n self._emit_str_warning()\n return str(self.value)\n\n\nclass RealObject(ModifiableObject):\n \"\"\"Specific object handle for Real signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to value.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (float): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n real value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n try:\n value = float(value)\n except ValueError:\n raise TypeError(\n \"Unsupported type for real value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_real, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> float:\n return self._handle.get_signal_val_real()\n\n def __float__(self):\n return float(self.value)\n\n\nclass EnumObject(ModifiableObject):\n \"\"\"Specific object handle for enumeration signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (int): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n integer value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, BinaryValue):\n value = int(value)\n elif not isinstance(value, int):\n raise TypeError(\n \"Unsupported type for enum value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> int:\n return self._handle.get_signal_val_long()\n\n\nclass IntegerObject(ModifiableObject):\n \"\"\"Specific object handle for Integer and Enum signals and variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (int): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n integer value assignment.\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, BinaryValue):\n value = int(value)\n elif not isinstance(value, int):\n raise TypeError(\n \"Unsupported type for integer value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_long, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> int:\n return self._handle.get_signal_val_long()\n\n\nclass StringObject(ModifiableObject):\n \"\"\"Specific object handle for String variables.\"\"\"\n\n def _set_value(self, value, call_sim):\n \"\"\"Set the value of the underlying simulation object to *value*.\n\n This operation will fail unless the handle refers to a modifiable\n object, e.g. net, signal or variable.\n\n Args:\n value (bytes): The value to drive onto the simulator object.\n\n Raises:\n TypeError: If target has an unsupported type for\n string value assignment.\n\n .. versionchanged:: 1.4\n Takes :class:`bytes` instead of :class:`str`.\n Users are now expected to choose an encoding when using these objects.\n As a convenience, when assigning :class:`str` values, ASCII encoding will be used as a safe default.\n\n \"\"\"\n value, set_action = self._check_for_set_action(value)\n\n if isinstance(value, str):\n warnings.warn(\n \"Handles on string objects will soon not accept `str` objects. \"\n \"Please use a bytes object by encoding the string as you see fit. \"\n \"`str.encode('ascii')` is typically sufficient.\", DeprecationWarning, stacklevel=2)\n value = value.encode('ascii') # may throw UnicodeEncodeError\n\n if not isinstance(value, bytes):\n raise TypeError(\n \"Unsupported type for string value assignment: {} ({!r})\"\n .format(type(value), value))\n\n call_sim(self, self._handle.set_signal_val_str, set_action, value)\n\n @ModifiableObject.value.getter\n def value(self) -> bytes:\n return self._handle.get_signal_val_str()\n\n def _emit_str_warning(self):\n warnings.warn(\n \"`str({t})` is deprecated, and in future will return `{t}._path`. \"\n \"To access the `bytes` value of this handle, use `{t}.value`.\"\n .format(t=type(self).__qualname__),\n FutureWarning, stacklevel=3)\n\n def __str__(self):\n self._emit_str_warning()\n return self.value.decode('ascii')\n\n\n_handle2obj = {}\n\n\ndef SimHandle(handle, path=None):\n \"\"\"Factory function to create the correct type of `SimHandle` object.\n\n Args:\n handle (int): The GPI handle to the simulator object.\n path (str): Path to this handle, ``None`` if root.\n\n Returns:\n The `SimHandle` object.\n\n Raises:\n TestError: If no matching object for GPI type could be found.\n \"\"\"\n _type2cls = {\n simulator.MODULE: HierarchyObject,\n simulator.STRUCTURE: HierarchyObject,\n simulator.REG: ModifiableObject,\n simulator.NET: ModifiableObject,\n simulator.NETARRAY: NonHierarchyIndexableObject,\n simulator.REAL: RealObject,\n simulator.INTEGER: IntegerObject,\n simulator.ENUM: EnumObject,\n simulator.STRING: StringObject,\n simulator.GENARRAY: HierarchyArrayObject,\n }\n\n # Enforce singletons since it's possible to retrieve handles avoiding\n # the hierarchy by getting driver/load information\n global _handle2obj\n try:\n return _handle2obj[handle]\n except KeyError:\n pass\n\n t = handle.get_type()\n\n # Special case for constants\n if handle.get_const() and t not in [\n simulator.MODULE,\n simulator.STRUCTURE,\n simulator.NETARRAY,\n simulator.GENARRAY,\n ]:\n obj = ConstantObject(handle, path, t)\n _handle2obj[handle] = obj\n return obj\n\n if t not in _type2cls:\n raise TestError(\"Couldn't find a matching object for GPI type %d (path=%s)\" % (t, path))\n obj = _type2cls[t](handle, path)\n _handle2obj[handle] = obj\n return obj\n", "path": "cocotb/handle.py" } ]
diff --git a/cocotb/handle.py b/cocotb/handle.py index 0e1e08145c..5225e24206 100755 --- a/cocotb/handle.py +++ b/cocotb/handle.py @@ -249,6 +249,10 @@ def __get_sub_handle_by_name(self, name): except KeyError: pass + if not self._discovered: + self._discover_all() + return self.__get_sub_handle_by_name(name) + # Cache to avoid a call to the simulator if we already know the name is # invalid. Unclear if we care, but we had this before. if name in self._invalid_sub_handles: diff --git a/cocotb/share/lib/vpi/VpiCbHdl.cpp b/cocotb/share/lib/vpi/VpiCbHdl.cpp index 61ffb1b1fd..454457cad0 100644 --- a/cocotb/share/lib/vpi/VpiCbHdl.cpp +++ b/cocotb/share/lib/vpi/VpiCbHdl.cpp @@ -576,7 +576,7 @@ decltype(VpiIterator::iterate_over) VpiIterator::iterate_over = []{ vpiRealNet, vpiStructVar, vpiStructNet, - //vpiVariables // Aldec SEGV on plain Verilog + vpiVariables, vpiNamedEvent, vpiNamedEventArray, vpiParameter, diff --git a/documentation/source/newsfragments/2079.bugfix.rst b/documentation/source/newsfragments/2079.bugfix.rst new file mode 100644 index 0000000000..ba12193b4d --- /dev/null +++ b/documentation/source/newsfragments/2079.bugfix.rst @@ -0,0 +1,8 @@ +Generate blocks are now accessible directly via lookup without having to iterate over parent handle. (:pr:`2079`) + + .. code-block:: python3 + + # Example pseudo-region + dut.genblk1 #<class 'cocotb.handle.HierarchyArrayObject'> + + .. consume the towncrier issue number on this line. diff --git a/tests/designs/sample_module/sample_module.sv b/tests/designs/sample_module/sample_module.sv index 914e7b7d42..7823a343da 100644 --- a/tests/designs/sample_module/sample_module.sv +++ b/tests/designs/sample_module/sample_module.sv @@ -98,8 +98,8 @@ initial begin $dumpvars(0,sample_module); end -reg[3:0] temp; parameter NUM_OF_MODULES = 4; +reg[NUM_OF_MODULES-1:0] temp; genvar idx; generate for (idx = 0; idx < NUM_OF_MODULES; idx=idx+1) begin diff --git a/tests/designs/sample_module/sample_module.vhdl b/tests/designs/sample_module/sample_module.vhdl index 394ccb1abe..bf943165e3 100644 --- a/tests/designs/sample_module/sample_module.vhdl +++ b/tests/designs/sample_module/sample_module.vhdl @@ -107,8 +107,20 @@ architecture impl of sample_module is type twoDimArrayType is array (natural range <>) of unsignedArrayType(31 downto 28); signal array_2d : twoDimArrayType(0 to 1); + constant NUM_OF_MODULES : natural := 4; + signal temp : std_logic_vector(NUM_OF_MODULES-1 downto 0); + begin + genblk1: for i in NUM_OF_MODULES - 1 downto 0 generate + begin + process (clk) begin + if rising_edge(clk) then + temp(i) <= '0'; + end if; + end process; + end generate; + process (clk) begin if rising_edge(clk) then stream_out_data_registered <= stream_in_data; diff --git a/tests/test_cases/test_array/test_array.py b/tests/test_cases/test_array/test_array.py index 04d9b6bdff..35f03650a3 100644 --- a/tests/test_cases/test_array/test_array.py +++ b/tests/test_cases/test_array/test_array.py @@ -336,7 +336,7 @@ def test_discover_all(dut): # Modelsim/Questa VPI will not find a vpiStructVar from vpiModule so we set a dummy variable # to ensure the handle is in the dut "sub_handles" for iterating # - # DO NOT ADD FOR ALDEC. Does not iterate over properly + # DO NOT ADD FOR ALDEC. Older Versions do not iterate over properly if cocotb.LANGUAGE in ["verilog"] and cocotb.SIM_NAME.lower().startswith(("modelsim", "ncsim", "xmsim")): dummy = dut.sig_rec dummy = dut.port_rec_out @@ -353,12 +353,16 @@ def test_discover_all(dut): elif cocotb.LANGUAGE in ["vhdl"]: pass_total = 856 elif cocotb.LANGUAGE in ["verilog"] and cocotb.SIM_NAME.lower().startswith(("riviera")): + # Numbers for versions before 2019.10 may be outdated if cocotb.SIM_VERSION.startswith(("2017.10.61")): pass_total = 803 elif cocotb.SIM_VERSION.startswith(("2016.06", "2016.10", "2017.02")): pass_total = 813 - elif cocotb.SIM_VERSION.startswith(("2016.02", "2019.10")): + elif cocotb.SIM_VERSION.startswith(("2016.02")): pass_total = 947 + elif cocotb.SIM_VERSION.startswith(("2019.10")): + # vpiVariables finds port_rec_out and sig_rec + pass_total = 1006 else: pass_total = 1038 else: diff --git a/tests/test_cases/test_discovery/test_discovery.py b/tests/test_cases/test_discovery/test_discovery.py index eb059d519e..b8d3070589 100644 --- a/tests/test_cases/test_discovery/test_discovery.py +++ b/tests/test_cases/test_discovery/test_discovery.py @@ -34,6 +34,20 @@ from cocotb.handle import IntegerObject, ConstantObject, HierarchyObject, StringObject [email protected]() +async def pseudo_region_access(dut): + """Test that pseudo-regions are accessible before iteration""" + + # Ensure pseudo-region lookup will fail + if len(dut._sub_handles) != 0: + dut._sub_handles = {} + + pseudo_region = dut.genblk1 + dut._log.info("Found %s (%s)", pseudo_region._name, type(pseudo_region)) + first_generate_instance = pseudo_region[0] + dut._log.info("Found %s (%s)", first_generate_instance._name, type(first_generate_instance)) + + @cocotb.test() def recursive_discover(dut): """Discover absolutely everything in the DUT""" diff --git a/tests/test_cases/test_iteration_mixedlang/test_iteration.py b/tests/test_cases/test_iteration_mixedlang/test_iteration.py index 27ae1be14b..dced184dc1 100644 --- a/tests/test_cases/test_iteration_mixedlang/test_iteration.py +++ b/tests/test_cases/test_iteration_mixedlang/test_iteration.py @@ -71,7 +71,7 @@ def recursive_discovery(dut): elif cocotb.SIM_NAME.lower().startswith(("modelsim")): pass_total = 933 else: - pass_total = 966 + pass_total = 1024 tlog = logging.getLogger("cocotb.test") yield Timer(100)
Hierarchy access for generate statement When we instantiate some block with generate statement. How to access that hierarchical module? example) When I want to access "SubBlock[3].u_SubModule.a". genvar i; generate for(i=0;i<5;i=i+1) begin : SubBlock SubModule u_SubModule( .a(...), .b(...), ); end endgenerate First, I tried to do this RisingEdge(dut.SubBlock[3].u_SubModule.a) -> cocotb can't find SubBlock and, Second RisingEdge(dut.u_SubModule.a) -> cocotb find their object. but, which module accessed? I instantiated five modules.
hylang__hy-92
[ { "content": "# -*- encoding: utf-8 -*-\n#\n# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n# Copyright (c) 2013 Julien Danjou <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.errors import HyError\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\nfrom hy.models.dict import HyDict\n\nfrom hy.util import flatten_literal_list\n\nimport codecs\nimport ast\nimport sys\n\n\nclass HyCompileError(HyError):\n pass\n\n\n_compile_table = {}\n\n\ndef ast_str(foobar):\n if sys.version_info[0] >= 3:\n return str(foobar)\n\n try:\n return str(foobar)\n except UnicodeEncodeError:\n pass\n\n enc = codecs.getencoder('punycode')\n foobar, _ = enc(foobar)\n return \"__hy_%s\" % (str(foobar).replace(\"-\", \"_\"))\n\n\ndef builds(_type):\n def _dec(fn):\n _compile_table[_type] = fn\n\n def shim(*args, **kwargs):\n return fn(*args, **kwargs)\n return shim\n return _dec\n\n\ndef _raise_wrong_args_number(expression, error):\n err = TypeError(error % (expression.pop(0),\n len(expression)))\n err.start_line = expression.start_line\n err.start_column = expression.start_column\n raise err\n\n\ndef checkargs(exact=None, min=None, max=None):\n def _dec(fn):\n def checker(self, expression):\n if exact is not None and (len(expression) - 1) != exact:\n _raise_wrong_args_number(expression,\n \"`%%s' needs %d arguments, got %%d\" %\n exact)\n\n if min is not None and (len(expression) - 1) < min:\n _raise_wrong_args_number(\n expression,\n \"`%%s' needs at least %d arguments, got %%d\" % (min))\n\n if max is not None and (len(expression) - 1) > max:\n _raise_wrong_args_number(\n expression,\n \"`%%s' needs at most %d arguments, got %%d\" % (max))\n\n return fn(self, expression)\n\n return checker\n return _dec\n\n\nclass HyASTCompiler(object):\n\n def __init__(self):\n self.returnable = False\n self.anon_fn_count = 0\n\n def compile(self, tree):\n try:\n for _type in _compile_table:\n if type(tree) == _type:\n return _compile_table[_type](self, tree)\n except Exception as e:\n err = HyCompileError(str(e))\n err.exception = e\n err.start_line = getattr(e, \"start_line\", None)\n err.start_column = getattr(e, \"start_column\", None)\n raise err\n\n raise HyCompileError(\"Unknown type - `%s'\" % (str(type(tree))))\n\n def _mangle_branch(self, tree, start_line, start_column):\n # If tree is empty, just return a pass statement\n if tree == []:\n return [ast.Pass(lineno=start_line,\n col_offset=start_column)]\n\n ret = []\n tree = list(flatten_literal_list(tree))\n tree.reverse()\n\n if self.returnable and len(tree) > 0:\n el = tree[0]\n if not isinstance(el, ast.stmt):\n el = tree.pop(0)\n ret.append(ast.Return(value=el,\n lineno=el.lineno,\n col_offset=el.col_offset))\n if isinstance(el, ast.FunctionDef):\n ret.append(ast.Return(\n value=ast.Name(\n arg=el.name, id=el.name, ctx=ast.Load(),\n lineno=el.lineno, col_offset=el.col_offset),\n lineno=el.lineno, col_offset=el.col_offset))\n\n for el in tree:\n if isinstance(el, ast.stmt):\n ret.append(el)\n continue\n\n ret.append(ast.Expr(value=el,\n lineno=el.lineno,\n col_offset=el.col_offset))\n\n ret.reverse()\n return ret\n\n @builds(list)\n def compile_raw_list(self, entries):\n return [self.compile(x) for x in entries]\n\n @builds(\"do\")\n @builds(\"progn\")\n def compile_do_expression(self, expr):\n return [self.compile(x) for x in expr[1:]]\n\n @builds(\"throw\")\n @builds(\"raise\")\n @checkargs(min=1)\n def compile_throw_expression(self, expr):\n expr.pop(0)\n exc = self.compile(expr.pop(0))\n return ast.Raise(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=exc,\n exc=exc,\n inst=None,\n tback=None)\n\n @builds(\"try\")\n def compile_try_expression(self, expr):\n expr.pop(0) # try\n\n if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:\n # Python 3.3 features a rename of TryExcept to Try.\n Try = ast.Try\n else:\n Try = ast.TryExcept\n\n try:\n body = expr.pop(0)\n except IndexError:\n body = []\n\n # (try something…)\n body = self._code_branch(self.compile(body),\n expr.start_line,\n expr.start_column)\n\n if len(expr) == 0:\n # (try) or (try body)\n handlers = [ast.ExceptHandler(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=None,\n name=None,\n body=[ast.Pass(lineno=expr.start_line,\n col_offset=expr.start_column)])]\n else:\n # (try body except except…)\n handlers = [self.compile(s) for s in expr]\n\n return Try(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n body=body,\n handlers=handlers,\n finalbody=[],\n orelse=[])\n\n @builds(\"catch\")\n @builds(\"except\")\n def compile_catch_expression(self, expr):\n catch = expr.pop(0) # catch\n\n try:\n exceptions = expr.pop(0)\n except IndexError:\n exceptions = HyList()\n # exceptions catch should be either:\n # [[list of exceptions]]\n # or\n # [variable [list of exceptions]]\n # or\n # [variable exception]\n # or\n # [exception]\n # or\n # []\n if not isinstance(exceptions, HyList):\n raise TypeError(\"`%s' exceptions list is not a list\" % catch)\n if len(exceptions) > 2:\n raise TypeError(\"`%s' exceptions list is too long\" % catch)\n\n # [variable [list of exceptions]]\n # let's pop variable and use it as name\n if len(exceptions) == 2:\n name = exceptions.pop(0)\n if sys.version_info[0] >= 3:\n # Python3 features a change where the Exception handler\n # moved the name from a Name() to a pure Python String type.\n #\n # We'll just make sure it's a pure \"string\", and let it work\n # it's magic.\n name = ast_str(name)\n else:\n # Python2 requires an ast.Name, set to ctx Store.\n name = self._storeize(self.compile(name))\n else:\n name = None\n\n try:\n exceptions_list = exceptions.pop(0)\n except IndexError:\n exceptions_list = []\n\n if isinstance(exceptions_list, list):\n if len(exceptions_list):\n # [FooBar BarFoo] → catch Foobar and BarFoo exceptions\n _type = ast.Tuple(elts=[self.compile(x)\n for x in exceptions_list],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n ctx=ast.Load())\n else:\n # [] → all exceptions catched\n _type = None\n elif isinstance(exceptions_list, HySymbol):\n _type = self.compile(exceptions_list)\n else:\n raise TypeError(\"`%s' needs a valid exception list\" % catch)\n\n body = self._code_branch([self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column)\n\n return ast.ExceptHandler(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=_type,\n name=name,\n body=body)\n\n def _code_branch(self, branch, start_line, start_column):\n return self._mangle_branch((branch\n if isinstance(branch, list)\n else [branch]),\n start_line,\n start_column)\n\n @builds(\"if\")\n @checkargs(min=2, max=3)\n def compile_if_expression(self, expr):\n expr.pop(0) # if\n test = self.compile(expr.pop(0))\n body = self._code_branch(self.compile(expr.pop(0)),\n expr.start_line,\n expr.start_column)\n\n if len(expr) == 1:\n orel = self._code_branch(self.compile(expr.pop(0)),\n expr.start_line,\n expr.start_column)\n else:\n orel = []\n\n return ast.If(test=test,\n body=body,\n orelse=orel,\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"print\")\n def compile_print_expression(self, expr):\n call = expr.pop(0) # print\n if sys.version_info[0] >= 3:\n call = self.compile(call)\n # AST changed with Python 3, we now just call it.\n return ast.Call(\n keywords=[],\n func=call,\n args=[self.compile(x) for x in expr],\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n return ast.Print(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n dest=None,\n values=[self.compile(x) for x in expr],\n nl=True)\n\n @builds(\"assert\")\n @checkargs(1)\n def compile_assert_expression(self, expr):\n expr.pop(0) # assert\n e = expr.pop(0)\n return ast.Assert(test=self.compile(e),\n msg=None,\n lineno=e.start_line,\n col_offset=e.start_column)\n\n @builds(\"lambda\")\n @checkargs(min=2)\n def compile_lambda_expression(self, expr):\n expr.pop(0)\n sig = expr.pop(0)\n body = expr.pop(0)\n # assert expr is empty\n return ast.Lambda(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n args=ast.arguments(args=[\n ast.Name(arg=ast_str(x), id=ast_str(x),\n ctx=ast.Param(),\n lineno=x.start_line,\n col_offset=x.start_column)\n for x in sig],\n vararg=None,\n kwarg=None,\n defaults=[],\n kwonlyargs=[],\n kw_defaults=[]),\n body=self.compile(body))\n\n @builds(\"pass\")\n @checkargs(0)\n def compile_pass_expression(self, expr):\n return ast.Pass(lineno=expr.start_line, col_offset=expr.start_column)\n\n @builds(\"yield\")\n @checkargs(1)\n def compile_yield_expression(self, expr):\n expr.pop(0)\n return ast.Yield(\n value=self.compile(expr.pop(0)),\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"import\")\n def compile_import_expression(self, expr):\n expr.pop(0) # index\n return ast.Import(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n names=[ast.alias(name=ast_str(x), asname=None) for x in expr])\n\n @builds(\"import_as\")\n def compile_import_as_expression(self, expr):\n expr.pop(0) # index\n modlist = [expr[i:i + 2] for i in range(0, len(expr), 2)]\n return ast.Import(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n module=ast_str(expr.pop(0)),\n names=[ast.alias(name=ast_str(x[0]),\n asname=ast_str(x[1])) for x in modlist])\n\n @builds(\"import_from\")\n @checkargs(min=1)\n def compile_import_from_expression(self, expr):\n expr.pop(0) # index\n return ast.ImportFrom(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n module=ast_str(expr.pop(0)),\n names=[ast.alias(name=ast_str(x), asname=None) for x in expr],\n level=0)\n\n @builds(\"get\")\n @checkargs(2)\n def compile_index_expression(self, expr):\n expr.pop(0) # index\n val = self.compile(expr.pop(0)) # target\n sli = self.compile(expr.pop(0)) # slice\n\n return ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=val,\n slice=ast.Index(value=sli),\n ctx=ast.Load())\n\n @builds(\"slice\")\n @checkargs(min=1, max=3)\n def compile_slice_expression(self, expr):\n expr.pop(0) # index\n val = self.compile(expr.pop(0)) # target\n\n low = None\n if expr != []:\n low = self.compile(expr.pop(0))\n\n high = None\n if expr != []:\n high = self.compile(expr.pop(0))\n\n return ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=val,\n slice=ast.Slice(lower=low,\n upper=high,\n step=None),\n ctx=ast.Load())\n\n @builds(\"assoc\")\n @checkargs(3)\n def compile_assoc_expression(self, expr):\n expr.pop(0) # assoc\n # (assoc foo bar baz) => foo[bar] = baz\n target = expr.pop(0)\n key = expr.pop(0)\n val = expr.pop(0)\n\n return ast.Assign(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n targets=[\n ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=self.compile(target),\n slice=ast.Index(value=self.compile(key)),\n ctx=ast.Store())],\n value=self.compile(val))\n\n @builds(\"decorate_with\")\n @checkargs(min=1)\n def compile_decorate_expression(self, expr):\n expr.pop(0) # decorate-with\n fn = self.compile(expr.pop(-1))\n if type(fn) != ast.FunctionDef:\n raise TypeError(\"Decorated a non-function\")\n fn.decorator_list = [self.compile(x) for x in expr]\n return fn\n\n @builds(\"with\")\n @checkargs(min=2)\n def compile_with_expression(self, expr):\n expr.pop(0) # with\n\n args = expr.pop(0)\n if len(args) > 2 or len(args) < 1:\n raise TypeError(\"with needs [arg (expr)] or [(expr)]\")\n\n args.reverse()\n ctx = self.compile(args.pop(0))\n\n thing = None\n if args != []:\n thing = self._storeize(self.compile(args.pop(0)))\n\n ret = ast.With(context_expr=ctx,\n lineno=expr.start_line,\n col_offset=expr.start_column,\n optional_vars=thing,\n body=self._code_branch(\n [self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column))\n\n if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:\n ret.items = [ast.withitem(context_expr=ctx, optional_vars=thing)]\n\n return ret\n\n @builds(\",\")\n def compile_tuple(self, expr):\n expr.pop(0)\n return ast.Tuple(elts=[self.compile(x) for x in expr],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n ctx=ast.Load())\n\n @builds(\"list_comp\")\n @checkargs(min=2, max=3)\n def compile_list_comprehension(self, expr):\n # (list-comp expr (target iter) cond?)\n expr.pop(0)\n expression = expr.pop(0)\n tar_it = iter(expr.pop(0))\n targets = zip(tar_it, tar_it)\n\n cond = self.compile(expr.pop(0)) if expr != [] else None\n\n ret = ast.ListComp(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n elt=self.compile(expression),\n generators=[])\n\n for target, iterable in targets:\n ret.generators.append(ast.comprehension(\n target=self._storeize(self.compile(target)),\n iter=self.compile(iterable),\n ifs=[]))\n\n if cond:\n ret.generators[-1].ifs.append(cond)\n\n return ret\n\n def _storeize(self, name):\n if isinstance(name, ast.Tuple):\n for x in name.elts:\n x.ctx = ast.Store()\n name.ctx = ast.Store()\n return name\n\n @builds(\"kwapply\")\n @checkargs(2)\n def compile_kwapply_expression(self, expr):\n expr.pop(0) # kwapply\n call = self.compile(expr.pop(0))\n kwargs = expr.pop(0)\n\n if type(call) != ast.Call:\n raise TypeError(\"kwapplying a non-call\")\n\n call.keywords = [ast.keyword(arg=ast_str(x),\n value=self.compile(kwargs[x])) for x in kwargs]\n\n return call\n\n @builds(\"not\")\n @builds(\"~\")\n @checkargs(1)\n def compile_unary_operator(self, expression):\n ops = {\"not\": ast.Not,\n \"~\": ast.Invert}\n operator = expression.pop(0)\n operand = expression.pop(0)\n return ast.UnaryOp(op=ops[operator](),\n operand=self.compile(operand),\n lineno=operator.start_line,\n col_offset=operator.start_column)\n\n @builds(\"and\")\n @builds(\"or\")\n @checkargs(min=2)\n def compile_logical_or_and_and_operator(self, expression):\n ops = {\"and\": ast.And,\n \"or\": ast.Or}\n operator = expression.pop(0)\n values = []\n for child in expression:\n values.append(self.compile(child))\n return ast.BoolOp(op=ops[operator](),\n lineno=operator.start_line,\n col_offset=operator.start_column,\n values=values)\n\n @builds(\"=\")\n @builds(\"!=\")\n @builds(\"<\")\n @builds(\"<=\")\n @builds(\">\")\n @builds(\">=\")\n @builds(\"is\")\n @builds(\"in\")\n @builds(\"is_not\")\n @builds(\"not_in\")\n @checkargs(min=2)\n def compile_compare_op_expression(self, expression):\n ops = {\"=\": ast.Eq, \"!=\": ast.NotEq,\n \"<\": ast.Lt, \"<=\": ast.LtE,\n \">\": ast.Gt, \">=\": ast.GtE,\n \"is\": ast.Is, \"is_not\": ast.IsNot,\n \"in\": ast.In, \"not_in\": ast.NotIn}\n\n inv = expression.pop(0)\n op = ops[inv]\n ops = [op() for x in range(1, len(expression))]\n e = expression.pop(0)\n\n return ast.Compare(left=self.compile(e),\n ops=ops,\n comparators=[self.compile(x) for x in expression],\n lineno=e.start_line,\n col_offset=e.start_column)\n\n @builds(\"+\")\n @builds(\"%\")\n @builds(\"-\")\n @builds(\"/\")\n @builds(\"*\")\n @checkargs(min=2)\n def compile_maths_expression(self, expression):\n # operator = Mod | Pow | LShift | RShift | BitOr |\n # BitXor | BitAnd | FloorDiv\n # (to implement list) XXX\n\n ops = {\"+\": ast.Add,\n \"/\": ast.Div,\n \"*\": ast.Mult,\n \"-\": ast.Sub,\n \"%\": ast.Mod}\n\n inv = expression.pop(0)\n op = ops[inv]\n\n left = self.compile(expression.pop(0))\n calc = None\n for child in expression:\n calc = ast.BinOp(left=left,\n op=op(),\n right=self.compile(child),\n lineno=child.start_line,\n col_offset=child.start_column)\n left = calc\n return calc\n\n def compile_dotted_expression(self, expr):\n ofn = expr.pop(0) # .join\n\n fn = HySymbol(ofn[1:])\n fn.replace(ofn)\n\n obj = expr.pop(0) # [1 2 3 4]\n\n return ast.Call(\n func=ast.Attribute(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=self.compile(obj),\n attr=ast_str(fn),\n ctx=ast.Load()),\n args=[self.compile(x) for x in expr],\n keywords=[],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n starargs=None,\n kwargs=None)\n\n @builds(HyExpression)\n def compile_expression(self, expression):\n fn = expression[0]\n if isinstance(fn, HyString):\n if fn in _compile_table:\n return _compile_table[fn](self, expression)\n\n if expression[0].startswith(\".\"):\n return self.compile_dotted_expression(expression)\n\n return ast.Call(func=self.compile(fn),\n args=[self.compile(x) for x in expression[1:]],\n keywords=[],\n starargs=None,\n kwargs=None,\n lineno=expression.start_line,\n col_offset=expression.start_column)\n\n @builds(\"def\")\n @builds(\"setf\")\n @builds(\"setv\")\n @checkargs(2)\n def compile_def_expression(self, expression):\n expression.pop(0) # \"def\"\n name = expression.pop(0)\n\n what = self.compile(expression.pop(0))\n\n if type(what) == ast.FunctionDef:\n # We special case a FunctionDef, since we can define by setting\n # FunctionDef's .name attribute, rather then foo == anon_fn. This\n # helps keep things clean.\n what.name = ast_str(name)\n return what\n\n name = self._storeize(self.compile(name))\n\n return ast.Assign(\n lineno=expression.start_line,\n col_offset=expression.start_column,\n targets=[name], value=what)\n\n @builds(\"foreach\")\n @checkargs(min=1)\n def compile_for_expression(self, expression):\n ret_status = self.returnable\n self.returnable = False\n\n expression.pop(0) # for\n name, iterable = expression.pop(0)\n target = self._storeize(self.compile_symbol(name))\n\n ret = ast.For(lineno=expression.start_line,\n col_offset=expression.start_column,\n target=target,\n iter=self.compile(iterable),\n body=self._code_branch(\n [self.compile(x) for x in expression],\n expression.start_line,\n expression.start_column),\n orelse=[])\n\n self.returnable = ret_status\n return ret\n\n @builds(\"while\")\n @checkargs(min=2)\n def compile_while_expression(self, expr):\n expr.pop(0) # \"while\"\n test = self.compile(expr.pop(0))\n\n return ast.While(test=test,\n body=self._code_branch(\n [self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column),\n orelse=[],\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(HyList)\n def compile_list(self, expr):\n return ast.List(\n elts=[self.compile(x) for x in expr],\n ctx=ast.Load(),\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"fn\")\n @checkargs(min=2)\n def compile_fn_expression(self, expression):\n expression.pop(0) # fn\n\n ret_status = self.returnable\n\n self.anon_fn_count += 1\n name = \"_hy_anon_fn_%d\" % (self.anon_fn_count)\n sig = expression.pop(0)\n\n body = []\n if expression != []:\n self.returnable = True\n tailop = self.compile(expression.pop(-1))\n self.returnable = False\n for el in expression:\n body.append(self.compile(el))\n body.append(tailop)\n\n self.returnable = True\n body = self._code_branch(body,\n expression.start_line,\n expression.start_column)\n\n ret = ast.FunctionDef(\n name=name,\n lineno=expression.start_line,\n col_offset=expression.start_column,\n args=ast.arguments(\n args=[\n ast.Name(\n arg=ast_str(x), id=ast_str(x),\n ctx=ast.Param(),\n lineno=x.start_line,\n col_offset=x.start_column)\n for x in sig],\n vararg=None,\n kwarg=None,\n kwonlyargs=[],\n kw_defaults=[],\n defaults=[]),\n body=body,\n decorator_list=[])\n\n self.returnable = ret_status\n return ret\n\n @builds(HyInteger)\n def compile_number(self, number):\n return ast.Num(n=int(number), # See HyInteger above.\n lineno=number.start_line,\n col_offset=number.start_column)\n\n @builds(HySymbol)\n def compile_symbol(self, symbol):\n if \".\" in symbol:\n glob, local = symbol.rsplit(\".\", 1)\n glob = HySymbol(glob)\n glob.replace(symbol)\n\n return ast.Attribute(\n lineno=symbol.start_line,\n col_offset=symbol.start_column,\n value=self.compile_symbol(glob),\n attr=ast_str(local),\n ctx=ast.Load()\n )\n\n return ast.Name(id=ast_str(symbol),\n arg=ast_str(symbol),\n ctx=ast.Load(),\n lineno=symbol.start_line,\n col_offset=symbol.start_column)\n\n @builds(HyString)\n def compile_string(self, string):\n return ast.Str(s=ast_str(string), lineno=string.start_line,\n col_offset=string.start_column)\n\n @builds(HyDict)\n def compile_dict(self, m):\n keys = []\n vals = []\n for entry in m:\n keys.append(self.compile(entry))\n vals.append(self.compile(m[entry]))\n\n return ast.Dict(\n lineno=m.start_line,\n col_offset=m.start_column,\n keys=keys,\n values=vals)\n\n\ndef hy_compile(tree, root=None):\n \" Compile a HyObject tree into a Python AST tree. \"\n compiler = HyASTCompiler()\n tlo = root\n if root is None:\n tlo = ast.Module\n ret = tlo(body=compiler._mangle_branch(compiler.compile(tree), 0, 0))\n return ret\n", "path": "hy/compiler.py" } ]
[ { "content": "# -*- encoding: utf-8 -*-\n#\n# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n# Copyright (c) 2013 Julien Danjou <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.errors import HyError\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\nfrom hy.models.dict import HyDict\n\nfrom hy.util import flatten_literal_list\n\nimport codecs\nimport ast\nimport sys\n\n\nclass HyCompileError(HyError):\n pass\n\n\n_compile_table = {}\n\n\ndef ast_str(foobar):\n if sys.version_info[0] >= 3:\n return str(foobar)\n\n try:\n return str(foobar)\n except UnicodeEncodeError:\n pass\n\n enc = codecs.getencoder('punycode')\n foobar, _ = enc(foobar)\n return \"__hy_%s\" % (str(foobar).replace(\"-\", \"_\"))\n\n\ndef builds(_type):\n def _dec(fn):\n _compile_table[_type] = fn\n\n def shim(*args, **kwargs):\n return fn(*args, **kwargs)\n return shim\n return _dec\n\n\ndef _raise_wrong_args_number(expression, error):\n err = TypeError(error % (expression.pop(0),\n len(expression)))\n err.start_line = expression.start_line\n err.start_column = expression.start_column\n raise err\n\n\ndef checkargs(exact=None, min=None, max=None):\n def _dec(fn):\n def checker(self, expression):\n if exact is not None and (len(expression) - 1) != exact:\n _raise_wrong_args_number(expression,\n \"`%%s' needs %d arguments, got %%d\" %\n exact)\n\n if min is not None and (len(expression) - 1) < min:\n _raise_wrong_args_number(\n expression,\n \"`%%s' needs at least %d arguments, got %%d\" % (min))\n\n if max is not None and (len(expression) - 1) > max:\n _raise_wrong_args_number(\n expression,\n \"`%%s' needs at most %d arguments, got %%d\" % (max))\n\n return fn(self, expression)\n\n return checker\n return _dec\n\n\nclass HyASTCompiler(object):\n\n def __init__(self):\n self.returnable = False\n self.anon_fn_count = 0\n\n def compile(self, tree):\n try:\n for _type in _compile_table:\n if type(tree) == _type:\n return _compile_table[_type](self, tree)\n except Exception as e:\n err = HyCompileError(str(e))\n err.exception = e\n err.start_line = getattr(e, \"start_line\", None)\n err.start_column = getattr(e, \"start_column\", None)\n raise err\n\n raise HyCompileError(\"Unknown type - `%s'\" % (str(type(tree))))\n\n def _mangle_branch(self, tree, start_line, start_column):\n # If tree is empty, just return a pass statement\n if tree == []:\n return [ast.Pass(lineno=start_line,\n col_offset=start_column)]\n\n ret = []\n tree = list(flatten_literal_list(tree))\n tree.reverse()\n\n if self.returnable and len(tree) > 0:\n el = tree[0]\n if not isinstance(el, ast.stmt):\n el = tree.pop(0)\n ret.append(ast.Return(value=el,\n lineno=el.lineno,\n col_offset=el.col_offset))\n if isinstance(el, ast.FunctionDef):\n ret.append(ast.Return(\n value=ast.Name(\n arg=el.name, id=el.name, ctx=ast.Load(),\n lineno=el.lineno, col_offset=el.col_offset),\n lineno=el.lineno, col_offset=el.col_offset))\n\n for el in tree:\n if isinstance(el, ast.stmt):\n ret.append(el)\n continue\n\n ret.append(ast.Expr(value=el,\n lineno=el.lineno,\n col_offset=el.col_offset))\n\n ret.reverse()\n return ret\n\n @builds(list)\n def compile_raw_list(self, entries):\n return [self.compile(x) for x in entries]\n\n @builds(\"do\")\n @builds(\"progn\")\n def compile_do_expression(self, expr):\n return [self.compile(x) for x in expr[1:]]\n\n @builds(\"throw\")\n @builds(\"raise\")\n @checkargs(max=1)\n def compile_throw_expression(self, expr):\n expr.pop(0)\n exc = self.compile(expr.pop(0)) if expr else None\n return ast.Raise(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=exc,\n exc=exc,\n inst=None,\n tback=None)\n\n @builds(\"try\")\n def compile_try_expression(self, expr):\n expr.pop(0) # try\n\n if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:\n # Python 3.3 features a rename of TryExcept to Try.\n Try = ast.Try\n else:\n Try = ast.TryExcept\n\n try:\n body = expr.pop(0)\n except IndexError:\n body = []\n\n # (try something…)\n body = self._code_branch(self.compile(body),\n expr.start_line,\n expr.start_column)\n\n if len(expr) == 0:\n # (try) or (try body)\n handlers = [ast.ExceptHandler(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=None,\n name=None,\n body=[ast.Pass(lineno=expr.start_line,\n col_offset=expr.start_column)])]\n else:\n # (try body except except…)\n handlers = [self.compile(s) for s in expr]\n\n return Try(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n body=body,\n handlers=handlers,\n finalbody=[],\n orelse=[])\n\n @builds(\"catch\")\n @builds(\"except\")\n def compile_catch_expression(self, expr):\n expr.pop(0) # catch\n\n try:\n exceptions = expr.pop(0)\n except IndexError:\n exceptions = []\n # exceptions catch should be either:\n # [[list of exceptions]]\n # or\n # [variable [list of exceptions]]\n # or\n # [variable exception]\n # or\n # [exception]\n # or\n # []\n if len(exceptions) > 2:\n raise TypeError(\"`catch' exceptions list is too long\")\n\n # [variable [list of exceptions]]\n # let's pop variable and use it as name\n if len(exceptions) == 2:\n name = exceptions.pop(0)\n if sys.version_info[0] >= 3:\n # Python3 features a change where the Exception handler\n # moved the name from a Name() to a pure Python String type.\n #\n # We'll just make sure it's a pure \"string\", and let it work\n # it's magic.\n name = ast_str(name)\n else:\n # Python2 requires an ast.Name, set to ctx Store.\n name = self._storeize(self.compile(name))\n else:\n name = None\n\n try:\n exceptions_list = exceptions.pop(0)\n except IndexError:\n exceptions_list = []\n\n if isinstance(exceptions_list, list):\n if len(exceptions_list):\n # [FooBar BarFoo] → catch Foobar and BarFoo exceptions\n _type = ast.Tuple(elts=[self.compile(x)\n for x in exceptions_list],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n ctx=ast.Load())\n else:\n # [] → all exceptions catched\n _type = None\n elif isinstance(exceptions_list, HySymbol):\n _type = self.compile(exceptions_list)\n else:\n raise TypeError(\"`catch' needs a valid exception list to catch\")\n\n body = self._code_branch([self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column)\n\n return ast.ExceptHandler(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n type=_type,\n name=name,\n body=body)\n\n def _code_branch(self, branch, start_line, start_column):\n return self._mangle_branch((branch\n if isinstance(branch, list)\n else [branch]),\n start_line,\n start_column)\n\n @builds(\"if\")\n @checkargs(min=2, max=3)\n def compile_if_expression(self, expr):\n expr.pop(0) # if\n test = self.compile(expr.pop(0))\n body = self._code_branch(self.compile(expr.pop(0)),\n expr.start_line,\n expr.start_column)\n\n if len(expr) == 1:\n orel = self._code_branch(self.compile(expr.pop(0)),\n expr.start_line,\n expr.start_column)\n else:\n orel = []\n\n return ast.If(test=test,\n body=body,\n orelse=orel,\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"print\")\n def compile_print_expression(self, expr):\n call = expr.pop(0) # print\n if sys.version_info[0] >= 3:\n call = self.compile(call)\n # AST changed with Python 3, we now just call it.\n return ast.Call(\n keywords=[],\n func=call,\n args=[self.compile(x) for x in expr],\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n return ast.Print(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n dest=None,\n values=[self.compile(x) for x in expr],\n nl=True)\n\n @builds(\"assert\")\n @checkargs(1)\n def compile_assert_expression(self, expr):\n expr.pop(0) # assert\n e = expr.pop(0)\n return ast.Assert(test=self.compile(e),\n msg=None,\n lineno=e.start_line,\n col_offset=e.start_column)\n\n @builds(\"lambda\")\n @checkargs(min=2)\n def compile_lambda_expression(self, expr):\n expr.pop(0)\n sig = expr.pop(0)\n body = expr.pop(0)\n # assert expr is empty\n return ast.Lambda(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n args=ast.arguments(args=[\n ast.Name(arg=ast_str(x), id=ast_str(x),\n ctx=ast.Param(),\n lineno=x.start_line,\n col_offset=x.start_column)\n for x in sig],\n vararg=None,\n kwarg=None,\n defaults=[],\n kwonlyargs=[],\n kw_defaults=[]),\n body=self.compile(body))\n\n @builds(\"pass\")\n @checkargs(0)\n def compile_pass_expression(self, expr):\n return ast.Pass(lineno=expr.start_line, col_offset=expr.start_column)\n\n @builds(\"yield\")\n @checkargs(1)\n def compile_yield_expression(self, expr):\n expr.pop(0)\n return ast.Yield(\n value=self.compile(expr.pop(0)),\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"import\")\n def compile_import_expression(self, expr):\n expr.pop(0) # index\n return ast.Import(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n names=[ast.alias(name=ast_str(x), asname=None) for x in expr])\n\n @builds(\"import_as\")\n def compile_import_as_expression(self, expr):\n expr.pop(0) # index\n modlist = [expr[i:i + 2] for i in range(0, len(expr), 2)]\n return ast.Import(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n module=ast_str(expr.pop(0)),\n names=[ast.alias(name=ast_str(x[0]),\n asname=ast_str(x[1])) for x in modlist])\n\n @builds(\"import_from\")\n @checkargs(min=1)\n def compile_import_from_expression(self, expr):\n expr.pop(0) # index\n return ast.ImportFrom(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n module=ast_str(expr.pop(0)),\n names=[ast.alias(name=ast_str(x), asname=None) for x in expr],\n level=0)\n\n @builds(\"get\")\n @checkargs(2)\n def compile_index_expression(self, expr):\n expr.pop(0) # index\n val = self.compile(expr.pop(0)) # target\n sli = self.compile(expr.pop(0)) # slice\n\n return ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=val,\n slice=ast.Index(value=sli),\n ctx=ast.Load())\n\n @builds(\"slice\")\n @checkargs(min=1, max=3)\n def compile_slice_expression(self, expr):\n expr.pop(0) # index\n val = self.compile(expr.pop(0)) # target\n\n low = None\n if expr != []:\n low = self.compile(expr.pop(0))\n\n high = None\n if expr != []:\n high = self.compile(expr.pop(0))\n\n return ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=val,\n slice=ast.Slice(lower=low,\n upper=high,\n step=None),\n ctx=ast.Load())\n\n @builds(\"assoc\")\n @checkargs(3)\n def compile_assoc_expression(self, expr):\n expr.pop(0) # assoc\n # (assoc foo bar baz) => foo[bar] = baz\n target = expr.pop(0)\n key = expr.pop(0)\n val = expr.pop(0)\n\n return ast.Assign(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n targets=[\n ast.Subscript(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=self.compile(target),\n slice=ast.Index(value=self.compile(key)),\n ctx=ast.Store())],\n value=self.compile(val))\n\n @builds(\"decorate_with\")\n @checkargs(min=1)\n def compile_decorate_expression(self, expr):\n expr.pop(0) # decorate-with\n fn = self.compile(expr.pop(-1))\n if type(fn) != ast.FunctionDef:\n raise TypeError(\"Decorated a non-function\")\n fn.decorator_list = [self.compile(x) for x in expr]\n return fn\n\n @builds(\"with\")\n @checkargs(min=2)\n def compile_with_expression(self, expr):\n expr.pop(0) # with\n\n args = expr.pop(0)\n if len(args) > 2 or len(args) < 1:\n raise TypeError(\"with needs [arg (expr)] or [(expr)]\")\n\n args.reverse()\n ctx = self.compile(args.pop(0))\n\n thing = None\n if args != []:\n thing = self._storeize(self.compile(args.pop(0)))\n\n ret = ast.With(context_expr=ctx,\n lineno=expr.start_line,\n col_offset=expr.start_column,\n optional_vars=thing,\n body=self._code_branch(\n [self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column))\n\n if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:\n ret.items = [ast.withitem(context_expr=ctx, optional_vars=thing)]\n\n return ret\n\n @builds(\",\")\n def compile_tuple(self, expr):\n expr.pop(0)\n return ast.Tuple(elts=[self.compile(x) for x in expr],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n ctx=ast.Load())\n\n @builds(\"list_comp\")\n @checkargs(min=2, max=3)\n def compile_list_comprehension(self, expr):\n # (list-comp expr (target iter) cond?)\n expr.pop(0)\n expression = expr.pop(0)\n tar_it = iter(expr.pop(0))\n targets = zip(tar_it, tar_it)\n\n cond = self.compile(expr.pop(0)) if expr != [] else None\n\n ret = ast.ListComp(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n elt=self.compile(expression),\n generators=[])\n\n for target, iterable in targets:\n ret.generators.append(ast.comprehension(\n target=self._storeize(self.compile(target)),\n iter=self.compile(iterable),\n ifs=[]))\n\n if cond:\n ret.generators[-1].ifs.append(cond)\n\n return ret\n\n def _storeize(self, name):\n if isinstance(name, ast.Tuple):\n for x in name.elts:\n x.ctx = ast.Store()\n name.ctx = ast.Store()\n return name\n\n @builds(\"kwapply\")\n @checkargs(2)\n def compile_kwapply_expression(self, expr):\n expr.pop(0) # kwapply\n call = self.compile(expr.pop(0))\n kwargs = expr.pop(0)\n\n if type(call) != ast.Call:\n raise TypeError(\"kwapplying a non-call\")\n\n call.keywords = [ast.keyword(arg=ast_str(x),\n value=self.compile(kwargs[x])) for x in kwargs]\n\n return call\n\n @builds(\"not\")\n @builds(\"~\")\n @checkargs(1)\n def compile_unary_operator(self, expression):\n ops = {\"not\": ast.Not,\n \"~\": ast.Invert}\n operator = expression.pop(0)\n operand = expression.pop(0)\n return ast.UnaryOp(op=ops[operator](),\n operand=self.compile(operand),\n lineno=operator.start_line,\n col_offset=operator.start_column)\n\n @builds(\"and\")\n @builds(\"or\")\n @checkargs(min=2)\n def compile_logical_or_and_and_operator(self, expression):\n ops = {\"and\": ast.And,\n \"or\": ast.Or}\n operator = expression.pop(0)\n values = []\n for child in expression:\n values.append(self.compile(child))\n return ast.BoolOp(op=ops[operator](),\n lineno=operator.start_line,\n col_offset=operator.start_column,\n values=values)\n\n @builds(\"=\")\n @builds(\"!=\")\n @builds(\"<\")\n @builds(\"<=\")\n @builds(\">\")\n @builds(\">=\")\n @builds(\"is\")\n @builds(\"in\")\n @builds(\"is_not\")\n @builds(\"not_in\")\n @checkargs(min=2)\n def compile_compare_op_expression(self, expression):\n ops = {\"=\": ast.Eq, \"!=\": ast.NotEq,\n \"<\": ast.Lt, \"<=\": ast.LtE,\n \">\": ast.Gt, \">=\": ast.GtE,\n \"is\": ast.Is, \"is_not\": ast.IsNot,\n \"in\": ast.In, \"not_in\": ast.NotIn}\n\n inv = expression.pop(0)\n op = ops[inv]\n ops = [op() for x in range(1, len(expression))]\n e = expression.pop(0)\n\n return ast.Compare(left=self.compile(e),\n ops=ops,\n comparators=[self.compile(x) for x in expression],\n lineno=e.start_line,\n col_offset=e.start_column)\n\n @builds(\"+\")\n @builds(\"%\")\n @builds(\"-\")\n @builds(\"/\")\n @builds(\"*\")\n @checkargs(min=2)\n def compile_maths_expression(self, expression):\n # operator = Mod | Pow | LShift | RShift | BitOr |\n # BitXor | BitAnd | FloorDiv\n # (to implement list) XXX\n\n ops = {\"+\": ast.Add,\n \"/\": ast.Div,\n \"*\": ast.Mult,\n \"-\": ast.Sub,\n \"%\": ast.Mod}\n\n inv = expression.pop(0)\n op = ops[inv]\n\n left = self.compile(expression.pop(0))\n calc = None\n for child in expression:\n calc = ast.BinOp(left=left,\n op=op(),\n right=self.compile(child),\n lineno=child.start_line,\n col_offset=child.start_column)\n left = calc\n return calc\n\n def compile_dotted_expression(self, expr):\n ofn = expr.pop(0) # .join\n\n fn = HySymbol(ofn[1:])\n fn.replace(ofn)\n\n obj = expr.pop(0) # [1 2 3 4]\n\n return ast.Call(\n func=ast.Attribute(\n lineno=expr.start_line,\n col_offset=expr.start_column,\n value=self.compile(obj),\n attr=ast_str(fn),\n ctx=ast.Load()),\n args=[self.compile(x) for x in expr],\n keywords=[],\n lineno=expr.start_line,\n col_offset=expr.start_column,\n starargs=None,\n kwargs=None)\n\n @builds(HyExpression)\n def compile_expression(self, expression):\n fn = expression[0]\n if isinstance(fn, HyString):\n if fn in _compile_table:\n return _compile_table[fn](self, expression)\n\n if expression[0].startswith(\".\"):\n return self.compile_dotted_expression(expression)\n\n return ast.Call(func=self.compile(fn),\n args=[self.compile(x) for x in expression[1:]],\n keywords=[],\n starargs=None,\n kwargs=None,\n lineno=expression.start_line,\n col_offset=expression.start_column)\n\n @builds(\"def\")\n @builds(\"setf\")\n @builds(\"setv\")\n @checkargs(2)\n def compile_def_expression(self, expression):\n expression.pop(0) # \"def\"\n name = expression.pop(0)\n\n what = self.compile(expression.pop(0))\n\n if type(what) == ast.FunctionDef:\n # We special case a FunctionDef, since we can define by setting\n # FunctionDef's .name attribute, rather then foo == anon_fn. This\n # helps keep things clean.\n what.name = ast_str(name)\n return what\n\n name = self._storeize(self.compile(name))\n\n return ast.Assign(\n lineno=expression.start_line,\n col_offset=expression.start_column,\n targets=[name], value=what)\n\n @builds(\"foreach\")\n @checkargs(min=1)\n def compile_for_expression(self, expression):\n ret_status = self.returnable\n self.returnable = False\n\n expression.pop(0) # for\n name, iterable = expression.pop(0)\n target = self._storeize(self.compile_symbol(name))\n\n ret = ast.For(lineno=expression.start_line,\n col_offset=expression.start_column,\n target=target,\n iter=self.compile(iterable),\n body=self._code_branch(\n [self.compile(x) for x in expression],\n expression.start_line,\n expression.start_column),\n orelse=[])\n\n self.returnable = ret_status\n return ret\n\n @builds(\"while\")\n @checkargs(min=2)\n def compile_while_expression(self, expr):\n expr.pop(0) # \"while\"\n test = self.compile(expr.pop(0))\n\n return ast.While(test=test,\n body=self._code_branch(\n [self.compile(x) for x in expr],\n expr.start_line,\n expr.start_column),\n orelse=[],\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(HyList)\n def compile_list(self, expr):\n return ast.List(\n elts=[self.compile(x) for x in expr],\n ctx=ast.Load(),\n lineno=expr.start_line,\n col_offset=expr.start_column)\n\n @builds(\"fn\")\n @checkargs(min=2)\n def compile_fn_expression(self, expression):\n expression.pop(0) # fn\n\n ret_status = self.returnable\n\n self.anon_fn_count += 1\n name = \"_hy_anon_fn_%d\" % (self.anon_fn_count)\n sig = expression.pop(0)\n\n body = []\n if expression != []:\n self.returnable = True\n tailop = self.compile(expression.pop(-1))\n self.returnable = False\n for el in expression:\n body.append(self.compile(el))\n body.append(tailop)\n\n self.returnable = True\n body = self._code_branch(body,\n expression.start_line,\n expression.start_column)\n\n ret = ast.FunctionDef(\n name=name,\n lineno=expression.start_line,\n col_offset=expression.start_column,\n args=ast.arguments(\n args=[\n ast.Name(\n arg=ast_str(x), id=ast_str(x),\n ctx=ast.Param(),\n lineno=x.start_line,\n col_offset=x.start_column)\n for x in sig],\n vararg=None,\n kwarg=None,\n kwonlyargs=[],\n kw_defaults=[],\n defaults=[]),\n body=body,\n decorator_list=[])\n\n self.returnable = ret_status\n return ret\n\n @builds(HyInteger)\n def compile_number(self, number):\n return ast.Num(n=int(number), # See HyInteger above.\n lineno=number.start_line,\n col_offset=number.start_column)\n\n @builds(HySymbol)\n def compile_symbol(self, symbol):\n if \".\" in symbol:\n glob, local = symbol.rsplit(\".\", 1)\n glob = HySymbol(glob)\n glob.replace(symbol)\n\n return ast.Attribute(\n lineno=symbol.start_line,\n col_offset=symbol.start_column,\n value=self.compile_symbol(glob),\n attr=ast_str(local),\n ctx=ast.Load()\n )\n\n return ast.Name(id=ast_str(symbol),\n arg=ast_str(symbol),\n ctx=ast.Load(),\n lineno=symbol.start_line,\n col_offset=symbol.start_column)\n\n @builds(HyString)\n def compile_string(self, string):\n return ast.Str(s=ast_str(string), lineno=string.start_line,\n col_offset=string.start_column)\n\n @builds(HyDict)\n def compile_dict(self, m):\n keys = []\n vals = []\n for entry in m:\n keys.append(self.compile(entry))\n vals.append(self.compile(m[entry]))\n\n return ast.Dict(\n lineno=m.start_line,\n col_offset=m.start_column,\n keys=keys,\n values=vals)\n\n\ndef hy_compile(tree, root=None):\n \" Compile a HyObject tree into a Python AST tree. \"\n compiler = HyASTCompiler()\n tlo = root\n if root is None:\n tlo = ast.Module\n ret = tlo(body=compiler._mangle_branch(compiler.compile(tree), 0, 0))\n return ret\n", "path": "hy/compiler.py" } ]
diff --git a/hy/compiler.py b/hy/compiler.py index 76b266a30..38566166d 100644 --- a/hy/compiler.py +++ b/hy/compiler.py @@ -167,10 +167,10 @@ def compile_do_expression(self, expr): @builds("throw") @builds("raise") - @checkargs(min=1) + @checkargs(max=1) def compile_throw_expression(self, expr): expr.pop(0) - exc = self.compile(expr.pop(0)) + exc = self.compile(expr.pop(0)) if expr else None return ast.Raise( lineno=expr.start_line, col_offset=expr.start_column, diff --git a/tests/compilers/test_ast.py b/tests/compilers/test_ast.py index f839feb0b..c10b822e6 100644 --- a/tests/compilers/test_ast.py +++ b/tests/compilers/test_ast.py @@ -94,22 +94,24 @@ def test_ast_good_do(): def test_ast_good_throw(): "Make sure AST can compile valid throw" + hy_compile(tokenize("(throw)")) hy_compile(tokenize("(throw 1)")) def test_ast_bad_throw(): "Make sure AST can't compile invalid throw" - cant_compile("(throw)") + cant_compile("(raise 1 2 3)") def test_ast_good_raise(): "Make sure AST can compile valid raise" + hy_compile(tokenize("(raise)")) hy_compile(tokenize("(raise 1)")) def test_ast_bad_raise(): "Make sure AST can't compile invalid raise" - cant_compile("(raise)") + cant_compile("(raise 1 2 3)") def test_ast_good_try(): diff --git a/tests/native_tests/language.hy b/tests/native_tests/language.hy index ef2de957f..a47a525c9 100644 --- a/tests/native_tests/language.hy +++ b/tests/native_tests/language.hy @@ -178,6 +178,26 @@ (try (pass) (except [IOError]) (except)) + ;; Test correct (raise) + (let [[passed false]] + (try + (try + (raise IndexError) + (except [IndexError] (raise))) + (except [IndexError] + (setv passed true))) + (assert passed)) + + ;; Test incorrect (raise) + (let [[passed false]] + (try + (raise) + ;; Python 2 raises TypeError + ;; Python 3 raises RuntimeError + (except [[TypeError RuntimeError]] + (setv passed true))) + (assert passed)) + (try (raise (KeyError)) (catch [[IOError]] (assert false))
Allow (raise) That's actually valid in Python to re-raise the last catched exception.
zulip__zulip-14678
[ { "content": "# System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html\n\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.timezone import utc as timezone_utc\n\nimport hashlib\nimport logging\nimport threading\nimport traceback\nfrom typing import Optional, Tuple\nfrom datetime import datetime, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom logging import Logger\n\nclass _RateLimitFilter:\n \"\"\"This class is designed to rate-limit Django error reporting\n notifications so that it won't send thousands of emails if the\n database or cache is completely down. It uses a remote shared\n cache (shared by all Django processes) for its default behavior\n (so that the deduplication is global, not per-process), and a\n local in-process cache for when it can't access the remote cache.\n\n This is critical code because it is called every time\n `logging.error` or `logging.exception` (or an exception) happens\n in the codebase.\n\n Adapted from https://djangosnippets.org/snippets/2242/.\n\n \"\"\"\n last_error = datetime.min.replace(tzinfo=timezone_utc)\n # This thread-local variable is used to detect recursive\n # exceptions during exception handling (primarily intended for\n # when accessing the shared cache throws an exception).\n handling_exception = threading.local()\n should_reset_handling_exception = False\n\n def can_use_remote_cache(self) -> Tuple[bool, bool]:\n if getattr(self.handling_exception, 'value', False):\n # If we're processing an exception that occurred\n # while handling an exception, this almost\n # certainly was because interacting with the\n # remote cache is failing (e.g. because the cache\n # is down). Fall back to tracking duplicate\n # exceptions in memory without the remote shared cache.\n return False, False\n\n # Now we test if the remote cache is accessible.\n #\n # This code path can only be reached if we are not potentially\n # handling a recursive exception, so here we set\n # self.handling_exception (in case the cache access we're\n # about to do triggers a `logging.error` or exception that\n # might recurse into this filter class), and actually record\n # that this is the main exception handler thread.\n try:\n self.handling_exception.value = True\n cache.set('RLF_TEST_KEY', 1, 1)\n return cache.get('RLF_TEST_KEY') == 1, True\n except Exception:\n return False, True\n\n def filter(self, record: logging.LogRecord) -> bool:\n # When the original filter() call finishes executing, it's\n # going to change handling_exception.value to False. The\n # local variable below tracks whether the *current*,\n # potentially recursive, filter() call is allowed to touch\n # that value (only the original will find this to be True\n # at the end of its execution)\n should_reset_handling_exception = False\n try:\n # Track duplicate errors\n duplicate = False\n rate = getattr(settings, '%s_LIMIT' % (self.__class__.__name__.upper(),),\n 600) # seconds\n\n if rate > 0:\n (use_cache, should_reset_handling_exception) = self.can_use_remote_cache()\n if use_cache:\n if record.exc_info is not None:\n tb = '\\n'.join(traceback.format_exception(*record.exc_info))\n else:\n tb = str(record)\n key = self.__class__.__name__.upper() + hashlib.sha1(tb.encode()).hexdigest()\n duplicate = cache.get(key) == 1\n if not duplicate:\n cache.set(key, 1, rate)\n else:\n min_date = timezone_now() - timedelta(seconds=rate)\n duplicate = (self.last_error >= min_date)\n if not duplicate:\n self.last_error = timezone_now()\n\n return not duplicate\n finally:\n if should_reset_handling_exception:\n self.handling_exception.value = False\n\nclass ZulipLimiter(_RateLimitFilter):\n pass\n\nclass EmailLimiter(_RateLimitFilter):\n pass\n\nclass ReturnTrue(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n return True\n\nclass ReturnEnabled(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n return settings.LOGGING_ENABLED\n\nclass RequireReallyDeployed(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n from django.conf import settings\n return settings.PRODUCTION\n\ndef skip_200_and_304(record: logging.LogRecord) -> bool:\n # Apparently, `status_code` is added by Django and is not an actual\n # attribute of LogRecord; as a result, mypy throws an error if we\n # access the `status_code` attribute directly.\n if getattr(record, 'status_code') in [200, 304]:\n return False\n\n return True\n\ndef skip_site_packages_logs(record: logging.LogRecord) -> bool:\n # This skips the log records that are generated from libraries\n # installed in site packages.\n # Workaround for https://code.djangoproject.com/ticket/26886\n if 'site-packages' in record.pathname:\n return False\n return True\n\ndef find_log_caller_module(record: logging.LogRecord) -> Optional[str]:\n '''Find the module name corresponding to where this record was logged.\n\n Sadly `record.module` is just the innermost component of the full\n module name, so we have to go reconstruct this ourselves.\n '''\n # Repeat a search similar to that in logging.Logger.findCaller.\n # The logging call should still be on the stack somewhere; search until\n # we find something in the same source file, and that should give the\n # right module name.\n f = logging.currentframe()\n while True:\n if f.f_code.co_filename == record.pathname:\n return f.f_globals.get('__name__')\n if f.f_back is None:\n return None\n f = f.f_back\n\nlogger_nicknames = {\n 'root': '', # This one is more like undoing a nickname.\n 'zulip.requests': 'zr', # Super common.\n}\n\ndef find_log_origin(record: logging.LogRecord) -> str:\n logger_name = logger_nicknames.get(record.name, record.name)\n\n if settings.LOGGING_SHOW_MODULE:\n module_name = find_log_caller_module(record)\n if module_name == logger_name or module_name == record.name:\n # Abbreviate a bit.\n pass\n else:\n logger_name = '{}/{}'.format(logger_name, module_name or '?')\n\n if settings.RUNNING_INSIDE_TORNADO:\n # In multi-sharded Tornado, it's often valuable to have which shard is\n # responsible for the request in the logs.\n from zerver.tornado.ioloop_logging import logging_data\n shard = logging_data.get('port', 'unknown')\n logger_name = \"{}:{}\".format(logger_name, shard)\n\n return logger_name\n\nlog_level_abbrevs = {\n 'DEBUG': 'DEBG',\n 'INFO': 'INFO',\n 'WARNING': 'WARN',\n 'ERROR': 'ERR',\n 'CRITICAL': 'CRIT',\n}\n\ndef abbrev_log_levelname(levelname: str) -> str:\n # It's unlikely someone will set a custom log level with a custom name,\n # but it's an option, so we shouldn't crash if someone does.\n return log_level_abbrevs.get(levelname, levelname[:4])\n\nclass ZulipFormatter(logging.Formatter):\n # Used in the base implementation. Default uses `,`.\n default_msec_format = '%s.%03d'\n\n def __init__(self) -> None:\n super().__init__(fmt=self._compute_fmt())\n\n def _compute_fmt(self) -> str:\n pieces = ['%(asctime)s', '%(zulip_level_abbrev)-4s']\n if settings.LOGGING_SHOW_PID:\n pieces.append('pid:%(process)d')\n pieces.extend(['[%(zulip_origin)s]', '%(message)s'])\n return ' '.join(pieces)\n\n def format(self, record: logging.LogRecord) -> str:\n if not getattr(record, 'zulip_decorated', False):\n # The `setattr` calls put this logic explicitly outside the bounds of the\n # type system; otherwise mypy would complain LogRecord lacks these attributes.\n setattr(record, 'zulip_level_abbrev', abbrev_log_levelname(record.levelname))\n setattr(record, 'zulip_origin', find_log_origin(record))\n setattr(record, 'zulip_decorated', True)\n return super().format(record)\n\ndef log_to_file(logger: Logger,\n filename: str,\n log_format: str=\"%(asctime)s %(levelname)-8s %(message)s\",\n ) -> None:\n \"\"\"Note: `filename` should be declared in zproject/settings.py with zulip_path.\"\"\"\n formatter = logging.Formatter(log_format)\n handler = logging.FileHandler(filename)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n", "path": "zerver/lib/logging_util.py" } ]
[ { "content": "# System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html\n\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.timezone import utc as timezone_utc\n\nimport hashlib\nimport logging\nimport threading\nimport traceback\nfrom typing import Optional, Tuple\nfrom datetime import datetime, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom logging import Logger\n\nclass _RateLimitFilter:\n \"\"\"This class is designed to rate-limit Django error reporting\n notifications so that it won't send thousands of emails if the\n database or cache is completely down. It uses a remote shared\n cache (shared by all Django processes) for its default behavior\n (so that the deduplication is global, not per-process), and a\n local in-process cache for when it can't access the remote cache.\n\n This is critical code because it is called every time\n `logging.error` or `logging.exception` (or an exception) happens\n in the codebase.\n\n Adapted from https://djangosnippets.org/snippets/2242/.\n\n \"\"\"\n last_error = datetime.min.replace(tzinfo=timezone_utc)\n # This thread-local variable is used to detect recursive\n # exceptions during exception handling (primarily intended for\n # when accessing the shared cache throws an exception).\n handling_exception = threading.local()\n should_reset_handling_exception = False\n\n def can_use_remote_cache(self) -> Tuple[bool, bool]:\n if getattr(self.handling_exception, 'value', False):\n # If we're processing an exception that occurred\n # while handling an exception, this almost\n # certainly was because interacting with the\n # remote cache is failing (e.g. because the cache\n # is down). Fall back to tracking duplicate\n # exceptions in memory without the remote shared cache.\n return False, False\n\n # Now we test if the remote cache is accessible.\n #\n # This code path can only be reached if we are not potentially\n # handling a recursive exception, so here we set\n # self.handling_exception (in case the cache access we're\n # about to do triggers a `logging.error` or exception that\n # might recurse into this filter class), and actually record\n # that this is the main exception handler thread.\n try:\n self.handling_exception.value = True\n cache.set('RLF_TEST_KEY', 1, 1)\n return cache.get('RLF_TEST_KEY') == 1, True\n except Exception:\n return False, True\n\n def filter(self, record: logging.LogRecord) -> bool:\n # When the original filter() call finishes executing, it's\n # going to change handling_exception.value to False. The\n # local variable below tracks whether the *current*,\n # potentially recursive, filter() call is allowed to touch\n # that value (only the original will find this to be True\n # at the end of its execution)\n should_reset_handling_exception = False\n try:\n # Track duplicate errors\n duplicate = False\n rate = getattr(settings, '%s_LIMIT' % (self.__class__.__name__.upper(),),\n 600) # seconds\n\n if rate > 0:\n (use_cache, should_reset_handling_exception) = self.can_use_remote_cache()\n if use_cache:\n if record.exc_info is not None:\n tb = '\\n'.join(traceback.format_exception(*record.exc_info))\n else:\n tb = str(record)\n key = self.__class__.__name__.upper() + hashlib.sha1(tb.encode()).hexdigest()\n duplicate = cache.get(key) == 1\n if not duplicate:\n cache.set(key, 1, rate)\n else:\n min_date = timezone_now() - timedelta(seconds=rate)\n duplicate = (self.last_error >= min_date)\n if not duplicate:\n self.last_error = timezone_now()\n\n return not duplicate\n finally:\n if should_reset_handling_exception:\n self.handling_exception.value = False\n\nclass ZulipLimiter(_RateLimitFilter):\n pass\n\nclass EmailLimiter(_RateLimitFilter):\n pass\n\nclass ReturnTrue(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n return True\n\nclass ReturnEnabled(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n return settings.LOGGING_ENABLED\n\nclass RequireReallyDeployed(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n from django.conf import settings\n return settings.PRODUCTION\n\ndef skip_200_and_304(record: logging.LogRecord) -> bool:\n # Apparently, `status_code` is added by Django and is not an actual\n # attribute of LogRecord; as a result, mypy throws an error if we\n # access the `status_code` attribute directly.\n if getattr(record, 'status_code', None) in [200, 304]:\n return False\n\n return True\n\ndef skip_site_packages_logs(record: logging.LogRecord) -> bool:\n # This skips the log records that are generated from libraries\n # installed in site packages.\n # Workaround for https://code.djangoproject.com/ticket/26886\n if 'site-packages' in record.pathname:\n return False\n return True\n\ndef find_log_caller_module(record: logging.LogRecord) -> Optional[str]:\n '''Find the module name corresponding to where this record was logged.\n\n Sadly `record.module` is just the innermost component of the full\n module name, so we have to go reconstruct this ourselves.\n '''\n # Repeat a search similar to that in logging.Logger.findCaller.\n # The logging call should still be on the stack somewhere; search until\n # we find something in the same source file, and that should give the\n # right module name.\n f = logging.currentframe()\n while f is not None:\n if f.f_code.co_filename == record.pathname:\n return f.f_globals.get('__name__')\n f = f.f_back\n return None # type: ignore # required because of previous ignore on f\n\nlogger_nicknames = {\n 'root': '', # This one is more like undoing a nickname.\n 'zulip.requests': 'zr', # Super common.\n}\n\ndef find_log_origin(record: logging.LogRecord) -> str:\n logger_name = logger_nicknames.get(record.name, record.name)\n\n if settings.LOGGING_SHOW_MODULE:\n module_name = find_log_caller_module(record)\n if module_name == logger_name or module_name == record.name:\n # Abbreviate a bit.\n pass\n else:\n logger_name = '{}/{}'.format(logger_name, module_name or '?')\n\n if settings.RUNNING_INSIDE_TORNADO:\n # In multi-sharded Tornado, it's often valuable to have which shard is\n # responsible for the request in the logs.\n from zerver.tornado.ioloop_logging import logging_data\n shard = logging_data.get('port', 'unknown')\n logger_name = \"{}:{}\".format(logger_name, shard)\n\n return logger_name\n\nlog_level_abbrevs = {\n 'DEBUG': 'DEBG',\n 'INFO': 'INFO',\n 'WARNING': 'WARN',\n 'ERROR': 'ERR',\n 'CRITICAL': 'CRIT',\n}\n\ndef abbrev_log_levelname(levelname: str) -> str:\n # It's unlikely someone will set a custom log level with a custom name,\n # but it's an option, so we shouldn't crash if someone does.\n return log_level_abbrevs.get(levelname, levelname[:4])\n\nclass ZulipFormatter(logging.Formatter):\n # Used in the base implementation. Default uses `,`.\n default_msec_format = '%s.%03d'\n\n def __init__(self) -> None:\n super().__init__(fmt=self._compute_fmt())\n\n def _compute_fmt(self) -> str:\n pieces = ['%(asctime)s', '%(zulip_level_abbrev)-4s']\n if settings.LOGGING_SHOW_PID:\n pieces.append('pid:%(process)d')\n pieces.extend(['[%(zulip_origin)s]', '%(message)s'])\n return ' '.join(pieces)\n\n def format(self, record: logging.LogRecord) -> str:\n if not getattr(record, 'zulip_decorated', False):\n # The `setattr` calls put this logic explicitly outside the bounds of the\n # type system; otherwise mypy would complain LogRecord lacks these attributes.\n setattr(record, 'zulip_level_abbrev', abbrev_log_levelname(record.levelname))\n setattr(record, 'zulip_origin', find_log_origin(record))\n setattr(record, 'zulip_decorated', True)\n return super().format(record)\n\ndef log_to_file(logger: Logger,\n filename: str,\n log_format: str=\"%(asctime)s %(levelname)-8s %(message)s\",\n ) -> None:\n \"\"\"Note: `filename` should be declared in zproject/settings.py with zulip_path.\"\"\"\n formatter = logging.Formatter(log_format)\n handler = logging.FileHandler(filename)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n", "path": "zerver/lib/logging_util.py" } ]
diff --git a/zerver/lib/logging_util.py b/zerver/lib/logging_util.py index 6f5bf023873ac..43c9030ebb10a 100644 --- a/zerver/lib/logging_util.py +++ b/zerver/lib/logging_util.py @@ -119,7 +119,7 @@ def skip_200_and_304(record: logging.LogRecord) -> bool: # Apparently, `status_code` is added by Django and is not an actual # attribute of LogRecord; as a result, mypy throws an error if we # access the `status_code` attribute directly. - if getattr(record, 'status_code') in [200, 304]: + if getattr(record, 'status_code', None) in [200, 304]: return False return True
AttributeError: 'LogRecord' object has no attribute 'status_code' I have a development environment with the latest Git version. After performing many requests, I get blocked because of rate limiting. Then, the following error is logged in the console: ``` ---------------------------------------- Exception happened during processing of request from ('127.0.0.1', 56444) 2020-04-16 11:35:49.159 INFO [zr] 127.0.0.1 POST 429 65ms (mem: 57ms/4) (+start: 24ms) /json/messages (10@zulip via website) 2020-04-16 11:35:49.160 INFO [zr] status=429, data=b'{"result":"error","msg":"API usage exceeded rate limit","retry-after":2.6131470203}\n', uid=10@zulip 2020-04-16 11:35:49.162 INFO [zr] 127.0.0.1 POST 429 11ms (mem: 7ms/2) /json/messages (10@zulip via website) 2020-04-16 11:35:49.162 WARN [django.server] "POST /json/messages HTTP/1.1" 429 84 2020-04-16 11:35:49.173 INFO [zr] status=429, data=b'{"result":"error","msg":"API usage exceeded rate limit","retry-after":2.6109778881}\n', uid=10@zulip 2020-04-16 11:35:49.179 INFO [zr] 127.0.0.1 POST 429 20ms (+start: 51ms) /json/messages (10@zulip via website) 2020-04-16 11:35:49.182 WARN [django.server] "POST /json/messages HTTP/1.1" 429 84 2020-04-16 11:35:49.195 INFO [zr] status=429, data=b'{"result":"error","msg":"API usage exceeded rate limit","retry-after":2.5940015316}\n', uid=10@zulip Traceback (most recent call last): File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread self.finish_request(request, client_address) File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request self.RequestHandlerClass(request, client_address, self) File "/usr/lib/python3.7/socketserver.py", line 720, in __init__ self.handle() File "/srv/zulip-py3-venv/lib/python3.7/site-packages/django/core/servers/basehttp.py", line 171, in handle self.handle_one_request() File "/srv/zulip-py3-venv/lib/python3.7/site-packages/django/core/servers/basehttp.py", line 187, in handle_one_request if not self.parse_request(): # An error code has been sent, just exit File "/usr/lib/python3.7/http/server.py", line 322, in parse_request "Bad request syntax (%r)" % requestline) File "/usr/lib/python3.7/http/server.py", line 456, in send_error self.log_error("code %d, message %s", code, message) File "/usr/lib/python3.7/http/server.py", line 558, in log_error self.log_message(format, *args) File "/srv/zulip-py3-venv/lib/python3.7/site-packages/django/core/servers/basehttp.py", line 154, in log_message level(format, *args, extra=extra) File "/usr/lib/python3.7/logging/__init__.py", line 1383, in info self._log(INFO, msg, args, **kwargs) File "/usr/lib/python3.7/logging/__init__.py", line 1519, in _log self.handle(record) File "/usr/lib/python3.7/logging/__init__.py", line 1528, in handle if (not self.disabled) and self.filter(record): File "/usr/lib/python3.7/logging/__init__.py", line 762, in filter result = f.filter(record) File "/srv/zulip-py3-venv/lib/python3.7/site-packages/django/utils/log.py", line 147, in filter if self.callback(record): File "/home/sjoerd/zulip/zerver/lib/logging_util.py", line 122, in skip_200_and_304 if getattr(record, 'status_code') in [200, 304]: AttributeError: 'LogRecord' object has no attribute 'status_code' ---------------------------------------- ``` Normally, [http.server logs request, status code, size](https://github.com/python/cpython/blob/master/Lib/http/server.py#L544-L545), and [Django extracts the status code from that](https://github.com/django/django/blob/master/django/core/servers/basehttp.py#L144-L157). However, [on errors http.server logs code and message](https://github.com/python/cpython/blob/master/Lib/http/server.py#L457) and Django doesn't extract the status code. Parsing arguments to log messages seems pretty fragile to me, so maybe it's better to accept that there isn't always a status code on a log record. Making `getattr` default to `None` in [`skip_200_and_304`](https://github.com/zulip/zulip/blob/master/zerver/lib/logging_util.py#L122) is probably the best option.
mkdocs__mkdocs-2800
[ { "content": "import jinja2\n\nfrom mkdocs.utils import normalize_url\n\n\[email protected]\ndef url_filter(context, value):\n \"\"\" A Template filter to normalize URLs. \"\"\"\n return normalize_url(value, page=context['page'], base=context['base_url'])\n", "path": "mkdocs/utils/filters.py" } ]
[ { "content": "try:\n from jinja2 import pass_context as contextfilter\nexcept ImportError:\n from jinja2 import contextfilter\n\nfrom mkdocs.utils import normalize_url\n\n\n@contextfilter\ndef url_filter(context, value):\n \"\"\" A Template filter to normalize URLs. \"\"\"\n return normalize_url(value, page=context['page'], base=context['base_url'])\n", "path": "mkdocs/utils/filters.py" } ]
diff --git a/mkdocs/utils/filters.py b/mkdocs/utils/filters.py index abf715f9ac..c3049db9aa 100644 --- a/mkdocs/utils/filters.py +++ b/mkdocs/utils/filters.py @@ -1,9 +1,12 @@ -import jinja2 +try: + from jinja2 import pass_context as contextfilter +except ImportError: + from jinja2 import contextfilter from mkdocs.utils import normalize_url [email protected] +@contextfilter def url_filter(context, value): """ A Template filter to normalize URLs. """ return normalize_url(value, page=context['page'], base=context['base_url'])
jinja2 3.1.0 breaks mkdocs since the jinja2 3.1.0 release mkdocs does not work any more: ``` admin@host ui % pip install jinja2==3.1.0 Collecting jinja2==3.1.0 Using cached Jinja2-3.1.0-py3-none-any.whl (132 kB) Installing collected packages: jinja2 Attempting uninstall: jinja2 Found existing installation: Jinja2 3.0.0 Uninstalling Jinja2-3.0.0: Successfully uninstalled Jinja2-3.0.0 Successfully installed jinja2-3.1.0 admin@host ui % mkdocs build Traceback (most recent call last): File "/usr/local/bin/mkdocs", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.9/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.9/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/mkdocs/__main__.py", line 187, in build_command build.build(config.load_config(**kwargs), dirty=not clean) File "/usr/local/lib/python3.9/site-packages/mkdocs/config/base.py", line 216, in load_config from mkdocs.config.defaults import get_schema File "/usr/local/lib/python3.9/site-packages/mkdocs/config/defaults.py", line 1, in <module> from mkdocs.config import config_options File "/usr/local/lib/python3.9/site-packages/mkdocs/config/config_options.py", line 8, in <module> from mkdocs import utils, theme, plugins File "/usr/local/lib/python3.9/site-packages/mkdocs/theme.py", line 6, in <module> from mkdocs.utils import filters File "/usr/local/lib/python3.9/site-packages/mkdocs/utils/filters.py", line 13, in <module> @jinja2.contextfilter AttributeError: module 'jinja2' has no attribute 'contextfilter' ``` However, if I install jinja2 3.0.0: ``` admin@host ui % pip install jinja2==3.0.0 Collecting jinja2==3.0.0 Using cached Jinja2-3.0.0-py3-none-any.whl (133 kB) Requirement already satisfied: MarkupSafe>=2.0.0rc2 in /usr/local/lib/python3.9/site-packages (from jinja2==3.0.0) (2.1.1) Installing collected packages: jinja2 Attempting uninstall: jinja2 Found existing installation: Jinja2 3.1.0 Uninstalling Jinja2-3.1.0: Successfully uninstalled Jinja2-3.1.0 Successfully installed jinja2-3.0.0 admin@host ui % mkdocs build INFO - Cleaning site directory INFO - Building documentation to directory: /Users/admin/git/searchlab/ui/site INFO - Documentation built in 0.33 seconds ``` - mkdocs can be patched by explicitly installing jinja2 3.0.0. - maybe this is not a mkdocs bug, but a jinja2 bug; however, this could be patched in mkdocs as well. Prevent error with Jinja2 v3.1 Fixes #2794 See [Jinja2 v3.0.0 changes](https://jinja.palletsprojects.com/en/3.0.x/changes/#version-3-0-0): > The function and filter decorators have been renamed and unified. The old names are deprecated... > > - `pass_context` replaces `contextfunction` and `contextfilter`.
cloud-custodian__cloud-custodian-4194
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment, register_universal_tags\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudwatch'\n type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n dimension = None\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(object):\n service = 'events'\n type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n dimension = None\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(object):\n service = 'events'\n type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n dimension = None\n filter_type = filter_name = None\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(object):\n service = 'logs'\n type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n\n augment = universal_augment\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\nregister_universal_tags(LogGroup.filter_registry, LogGroup.action_registry)\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n client.put_retention_policy(\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py" } ]
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment, register_universal_tags\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudwatch'\n type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n dimension = None\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(object):\n service = 'events'\n type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n dimension = None\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(object):\n service = 'events'\n type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n dimension = None\n filter_type = filter_name = None\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(object):\n service = 'logs'\n type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n\n def augment(self, resources):\n resources = universal_augment(self, resources)\n for r in resources:\n r['creationTime'] = r['creationTime'] / 1000.0\n return resources\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\nregister_universal_tags(LogGroup.filter_registry, LogGroup.action_registry)\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n client.put_retention_policy(\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py" } ]
diff --git a/c7n/resources/cw.py b/c7n/resources/cw.py index 095aa01b780..d51e87a6bc6 100644 --- a/c7n/resources/cw.py +++ b/c7n/resources/cw.py @@ -163,7 +163,11 @@ class resource_type(object): dimension = 'LogGroupName' date = 'creationTime' - augment = universal_augment + def augment(self, resources): + resources = universal_augment(self, resources) + for r in resources: + r['creationTime'] = r['creationTime'] / 1000.0 + return resources def get_arns(self, resources): # log group arn in resource describe has ':*' suffix, not all diff --git a/tests/data/placebo/test_log_group_age_normalize/logs.DescribeLogGroups_1.json b/tests/data/placebo/test_log_group_age_normalize/logs.DescribeLogGroups_1.json new file mode 100644 index 00000000000..2a387c1452c --- /dev/null +++ b/tests/data/placebo/test_log_group_age_normalize/logs.DescribeLogGroups_1.json @@ -0,0 +1,25 @@ +{ + "status_code": 200, + "data": { + "logGroups": [ + { + "logGroupName": "/aws/lambda/myIOTFunction", + "creationTime": 1548368507441, + "metricFilterCount": 0, + "arn": "arn:aws:logs:us-west-2:644160558196:log-group:/aws/lambda/myIOTFunction:*", + "storedBytes": 918177 + } + ], + "ResponseMetadata": { + "RequestId": "81e0704d-91f7-11e9-9bca-9bc2904299e3", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "81e0704d-91f7-11e9-9bca-9bc2904299e3", + "content-type": "application/x-amz-json-1.1", + "content-length": "214", + "date": "Tue, 18 Jun 2019 18:33:02 GMT" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_log_group_age_normalize/tagging.GetResources_1.json b/tests/data/placebo/test_log_group_age_normalize/tagging.GetResources_1.json new file mode 100644 index 00000000000..fcea880811d --- /dev/null +++ b/tests/data/placebo/test_log_group_age_normalize/tagging.GetResources_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "PaginationToken": "", + "ResourceTagMappingList": [], + "ResponseMetadata": { + "RequestId": "82178597-91f7-11e9-a240-1bab5274610f", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "82178597-91f7-11e9-a240-1bab5274610f", + "content-type": "application/x-amz-json-1.1", + "content-length": "50", + "date": "Tue, 18 Jun 2019 18:33:03 GMT" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/test_cwl.py b/tests/test_cwl.py index 43cfff12488..8ae234e2f73 100644 --- a/tests/test_cwl.py +++ b/tests/test_cwl.py @@ -32,6 +32,22 @@ def test_cross_account(self): self.assertEqual(len(resources), 1) self.assertEqual(resources[0]["c7n:CrossAccountViolations"], ["1111111111111"]) + def test_age_normalize(self): + factory = self.replay_flight_data("test_log_group_age_normalize") + p = self.load_policy({ + 'name': 'log-age', + 'resource': 'aws.log-group', + 'filters': [{ + 'type': 'value', + 'value_type': 'age', + 'value': 30, + 'op': 'greater-than', + 'key': 'creationTime'}]}, + session_factory=factory, config={'region': 'us-west-2'}) + resources = p.run() + self.assertEqual(len(resources), 1) + self.assertEqual(resources[0]['creationTime'], 1548368507.441) + def test_last_write(self): factory = self.replay_flight_data("test_log_group_last_write") p = self.load_policy(
Can't filter log-group using creationTime age I'm trying, but failing to filter log groups that were created more than 30 days ago using the following filter: ``` policies: - name: OldLogGroups resource: log-group filters: - type: value key: creationTime op: gt value_type: age value: 30 ``` According to [AWS docs](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_LogGroup.html), the property `creationTime` stores the creation time of the log group, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. I understand 'age' value_type already supports standard unix epoch expressed in seconds, but not milliseconds (#2051). Support for date/time values expressed in milliseconds or adding a new filter like `log-group-age` would allow to cleanup old log groups that have been created certain time ago and either have 0 stored bytes or have not had any writes for a while. custodian version: 0.8.42.1
gratipay__gratipay.com-302
[ { "content": "import datetime\nimport locale\nimport os\nfrom decimal import Decimal\n\n\ntry: # XXX This can't be right.\n locale.setlocale(locale.LC_ALL, \"en_US.utf8\")\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n\n\nBIRTHDAY = datetime.date(2012, 6, 1)\nCARDINALS = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\nMONTHS = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July',\n 'August', 'September', 'October', 'November', 'December']\n\ndef age():\n today = datetime.date.today()\n nmonths = today.month - BIRTHDAY.month\n plural = 's' if nmonths != 1 else ''\n if nmonths < 10:\n nmonths = CARDINALS[nmonths]\n else:\n nmonths = str(nmonths)\n return \"%s month%s\" % (nmonths, plural)\n\n\ndb = None # This global is wired in wireup. It's an instance of\n # gittip.postgres.PostgresManager.\n\n# Not sure we won't want this for something yet. Prune if you don't find it in\n# the codebase in a month.\nOLD_OLD_AMOUNTS= [Decimal(a) for a in ('0.00', '0.08', '0.16', '0.32', '0.64', '1.28')]\nOLD_AMOUNTS= [Decimal(a) for a in ('0.25',)]\n\nAMOUNTS= [Decimal(a) for a in ('0.00', '1.00', '3.00', '6.00', '12.00', '24.00')]\n\n\n__version__ = \"~~VERSION~~\"\n\n\ndef get_tip(tipper, tippee):\n \"\"\"Given two user ids, return a Decimal.\n \"\"\"\n TIP = \"\"\"\\\n\n SELECT amount\n FROM tips\n WHERE tipper=%s\n AND tippee=%s\n ORDER BY mtime DESC\n LIMIT 1\n\n \"\"\"\n rec = db.fetchone(TIP, (tipper, tippee))\n if rec is None:\n tip = Decimal(0.00)\n else:\n tip = rec['amount']\n return tip\n\n\ndef get_backed_amount(participant_id):\n \"\"\"Given a unicode, return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS backed\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.id = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = db.fetchone(BACKED, (participant_id,))\n if rec is None:\n amount = None\n else:\n amount = rec['backed'] # might be None\n\n if amount is None:\n amount = Decimal(0.00)\n\n return amount\n\n\ndef get_number_of_backers(participant_id):\n \"\"\"Given a unicode, return an int.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT count(amount) AS nbackers\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.id = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n\n \"\"\"\n rec = db.fetchone(BACKED, (participant_id,))\n if rec is None:\n nbackers = None\n else:\n nbackers = rec['nbackers'] # might be None\n\n if nbackers is None:\n nbackers = 0\n\n return nbackers\n\n\ndef get_tips_and_total(tipper, for_payday=False, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used to populate a participant's page for their own\n viewing pleasure, and also by the payday function. If for_payday is not\n False it must be a date object.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n if for_payday:\n\n # For payday we want the oldest relationship to be paid first.\n order_by = \"ctime ASC\"\n\n\n # This is where it gets crash-proof.\n # ==================================\n # We need to account for the fact that we may have crashed during\n # Payday and we're re-running that function. We only want to select\n # tips that existed before Payday started, but haven't been processed\n # as part of this Payday yet.\n #\n # It's a bug if the paydays subselect returns > 1 rows.\n #\n # XXX If we crash during Payday and we rerun it after a timezone\n # change, will we get burned? How?\n\n ts_filter = \"\"\"\\\n\n AND mtime < %s\n AND ( SELECT id\n FROM transfers\n WHERE tipper=t.tipper\n AND tippee=t.tippee\n AND timestamp >= %s\n ) IS NULL\n\n \"\"\"\n args = (tipper, for_payday, for_payday)\n else:\n order_by = \"amount DESC\"\n ts_filter = \"\"\n args = (tipper,)\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n FROM tips t\n JOIN participants p ON p.id = t.tippee\n WHERE tipper = %%s\n %s\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY %s\n , tippee\n\n \"\"\" % (ts_filter, order_by) # XXX, No injections here, right?!\n tips = list(db.fetchall(TIPS, args))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want to\n # show the total amount they've pledged (so they're not surprised when\n # someone *does* start accepting tips and all of a sudden they're hit with\n # bigger charges.\n\n if for_payday:\n to_total = [t for t in tips if t['claimed_time'] is not None]\n else:\n to_total = tips\n total = sum([t['amount'] for t in to_total])\n\n if not total:\n # If to_total is an empty list then total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n return tips, total\n\n\n# canonizer\n# =========\n# This is an Aspen hook to ensure that requests are served on a certain root\n# URL, even if multiple domains point to the application.\n\nclass X: pass\ncanonical_scheme = None\ncanonical_host = None\n\ndef canonize(request):\n \"\"\"Enforce a certain scheme and hostname. Store these on request as well.\n \"\"\"\n scheme = request.headers.get('X-Forwarded-Proto', 'http') # per Heroku\n host = request.headers['Host']\n bad_scheme = scheme != canonical_scheme\n bad_host = bool(canonical_host) and (host != canonical_host)\n # '' and False => ''\n if bad_scheme or bad_host:\n url = '%s://%s' % (canonical_scheme, canonical_host)\n if request.line.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n # Redirect to a particular path for idempotent methods.\n url += request.line.uri.path.raw\n if request.line.uri.querystring:\n url += '?' + request.line.uri.querystring.raw\n else:\n # For non-idempotent methods, redirect to homepage.\n url += '/'\n request.redirect(url, permanent=True)\n\n\ndef configure_payments(request):\n # Work-around for https://github.com/balanced/balanced-python/issues/5\n import balanced\n balanced.configure(os.environ['BALANCED_API_SECRET'])\n", "path": "gittip/__init__.py" } ]
[ { "content": "import datetime\nimport locale\nimport os\nfrom decimal import Decimal\n\n\ntry: # XXX This can't be right.\n locale.setlocale(locale.LC_ALL, \"en_US.utf8\")\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n\n\nBIRTHDAY = datetime.date(2012, 6, 1)\nCARDINALS = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\nMONTHS = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July',\n 'August', 'September', 'October', 'November', 'December']\n\ndef age():\n today = datetime.date.today()\n nmonths = today.month - BIRTHDAY.month\n plural = 's' if nmonths != 1 else ''\n if nmonths < 10:\n nmonths = CARDINALS[nmonths]\n else:\n nmonths = str(nmonths)\n return \"%s month%s\" % (nmonths, plural)\n\n\ndb = None # This global is wired in wireup. It's an instance of\n # gittip.postgres.PostgresManager.\n\n# Not sure we won't want this for something yet. Prune if you don't find it in\n# the codebase in a month.\nOLD_OLD_AMOUNTS= [Decimal(a) for a in ('0.00', '0.08', '0.16', '0.32', '0.64', '1.28')]\nOLD_AMOUNTS= [Decimal(a) for a in ('0.25',)]\n\nAMOUNTS= [Decimal(a) for a in ('0.00', '1.00', '3.00', '6.00', '12.00', '24.00')]\n\n\n__version__ = \"~~VERSION~~\"\n\n\ndef get_tip(tipper, tippee):\n \"\"\"Given two user ids, return a Decimal.\n \"\"\"\n TIP = \"\"\"\\\n\n SELECT amount\n FROM tips\n WHERE tipper=%s\n AND tippee=%s\n ORDER BY mtime DESC\n LIMIT 1\n\n \"\"\"\n rec = db.fetchone(TIP, (tipper, tippee))\n if rec is None:\n tip = Decimal(0.00)\n else:\n tip = rec['amount']\n return tip\n\n\ndef get_backed_amount(participant_id):\n \"\"\"Given a unicode, return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS backed\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.id = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = db.fetchone(BACKED, (participant_id,))\n if rec is None:\n amount = None\n else:\n amount = rec['backed'] # might be None\n\n if amount is None:\n amount = Decimal(0.00)\n\n return amount\n\n\ndef get_number_of_backers(participant_id):\n \"\"\"Given a unicode, return an int.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT count(amount) AS nbackers\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.id = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n\n \"\"\"\n rec = db.fetchone(BACKED, (participant_id,))\n if rec is None:\n nbackers = None\n else:\n nbackers = rec['nbackers'] # might be None\n\n if nbackers is None:\n nbackers = 0\n\n return nbackers\n\n\ndef get_tips_and_total(tipper, for_payday=False, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used to populate a participant's page for their own\n viewing pleasure, and also by the payday function. If for_payday is not\n False it must be a date object.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n if for_payday:\n\n # For payday we want the oldest relationship to be paid first.\n order_by = \"ctime ASC\"\n\n\n # This is where it gets crash-proof.\n # ==================================\n # We need to account for the fact that we may have crashed during\n # Payday and we're re-running that function. We only want to select\n # tips that existed before Payday started, but haven't been processed\n # as part of this Payday yet.\n #\n # It's a bug if the paydays subselect returns > 1 rows.\n #\n # XXX If we crash during Payday and we rerun it after a timezone\n # change, will we get burned? How?\n\n ts_filter = \"\"\"\\\n\n AND mtime < %s\n AND ( SELECT id\n FROM transfers\n WHERE tipper=t.tipper\n AND tippee=t.tippee\n AND timestamp >= %s\n ) IS NULL\n\n \"\"\"\n args = (tipper, for_payday, for_payday)\n else:\n order_by = \"amount DESC\"\n ts_filter = \"\"\n args = (tipper,)\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n FROM tips t\n JOIN participants p ON p.id = t.tippee\n WHERE tipper = %%s\n %s\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY %s\n , tippee\n\n \"\"\" % (ts_filter, order_by) # XXX, No injections here, right?!\n tips = list(db.fetchall(TIPS, args))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want to\n # show the total amount they've pledged (so they're not surprised when\n # someone *does* start accepting tips and all of a sudden they're hit with\n # bigger charges.\n\n if for_payday:\n to_total = [t for t in tips if t['claimed_time'] is not None]\n else:\n to_total = tips\n total = sum([t['amount'] for t in to_total])\n\n if not total:\n # If to_total is an empty list then total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n return tips, total\n\n\ndef get_histogram_of_giving(user):\n SQL = \"\"\"\n SELECT amount, count(amount) num_contributing FROM tips t WHERE\n tippee=%s GROUP BY (amount)\n \"\"\"\n results = dict()\n for amount_dict in db.fetchall(SQL, (user,)):\n results[amount_dict['amount']] = amount_dict['num_contributing']\n return results\n\n\n# canonizer\n# =========\n# This is an Aspen hook to ensure that requests are served on a certain root\n# URL, even if multiple domains point to the application.\n\nclass X: pass\ncanonical_scheme = None\ncanonical_host = None\n\ndef canonize(request):\n \"\"\"Enforce a certain scheme and hostname. Store these on request as well.\n \"\"\"\n scheme = request.headers.get('X-Forwarded-Proto', 'http') # per Heroku\n host = request.headers['Host']\n bad_scheme = scheme != canonical_scheme\n bad_host = bool(canonical_host) and (host != canonical_host)\n # '' and False => ''\n if bad_scheme or bad_host:\n url = '%s://%s' % (canonical_scheme, canonical_host)\n if request.line.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n # Redirect to a particular path for idempotent methods.\n url += request.line.uri.path.raw\n if request.line.uri.querystring:\n url += '?' + request.line.uri.querystring.raw\n else:\n # For non-idempotent methods, redirect to homepage.\n url += '/'\n request.redirect(url, permanent=True)\n\n\ndef configure_payments(request):\n # Work-around for https://github.com/balanced/balanced-python/issues/5\n import balanced\n balanced.configure(os.environ['BALANCED_API_SECRET'])\n", "path": "gittip/__init__.py" } ]
diff --git a/gittip/__init__.py b/gittip/__init__.py index 856ffa0418..a176a4934c 100644 --- a/gittip/__init__.py +++ b/gittip/__init__.py @@ -215,6 +215,17 @@ def get_tips_and_total(tipper, for_payday=False, db=None): return tips, total +def get_histogram_of_giving(user): + SQL = """ + SELECT amount, count(amount) num_contributing FROM tips t WHERE + tippee=%s GROUP BY (amount) + """ + results = dict() + for amount_dict in db.fetchall(SQL, (user,)): + results[amount_dict['amount']] = amount_dict['num_contributing'] + return results + + # canonizer # ========= # This is an Aspen hook to ensure that requests are served on a certain root diff --git a/tests/test_stats.py b/tests/test_stats.py index aa8cb5289b..5a931bbade 100644 --- a/tests/test_stats.py +++ b/tests/test_stats.py @@ -1,7 +1,9 @@ from datetime import datetime +from decimal import Decimal from mock import patch +import gittip from gittip.billing.payday import Payday from gittip import testing from gittip import wireup @@ -21,6 +23,59 @@ def test_commaize_commaizes_and_obeys_decimal_places(): assert actual == "1,000.0000", actual +class HistogramOfGivingTests(testing.GittipBaseDBTest): + def setUp(self): + super(HistogramOfGivingTests, self).setUp() + + user_ids = [x[1] for x in testing.GITHUB_USERS] + prices = (0, 1, 3, 6, 12, 24) + donation_map = { + 'lgtest': { + 'lglocktest': 1, + 'gittip-test-0': 3, + 'gittip-test-1': 6, + 'gittip-test-2': 0, + }, + 'lglocktest': { + 'lgtest': 3, + 'gittip-test-0': 6, + 'gittip-test-1': 6, + 'gittip-test-2': 3, + }, + 'gittip-test-0': { + 'lgtest': 12, + }, + 'gittip-test-1': { + 'lgtest': 3, + }, + 'gittip-test-2': { + 'lgtest': 6, + }, + 'gittip-test-3': { + }, + } + for tipper, donation in donation_map.iteritems(): + for tippee, amount in donation.iteritems(): + self.db.execute( + "INSERT INTO tips (ctime, tipper, tippee, amount) " \ + "VALUES (now(), %s, %s, %s);", + (tipper, tippee, amount)) + + def test_histogram(self): + expected = { + Decimal('3.00'): 2, + Decimal('6.00'): 1, + Decimal('12.00'):1 + } + actual = gittip.get_histogram_of_giving('lgtest') + self.assertEqual(expected, actual) + + def test_histogram_no_tips(self): + expected = {} + actual = gittip.get_histogram_of_giving('gittip-test-3') + self.assertEqual(expected, actual) + + # rendering class TestStatsPage(testing.GittipBaseTest): diff --git a/www/%participant_id/index.html b/www/%participant_id/index.html index 15605c5301..9f71300751 100644 --- a/www/%participant_id/index.html +++ b/www/%participant_id/index.html @@ -14,7 +14,8 @@ import requests from aspen import json, Response from aspen.utils import to_age -from gittip import AMOUNTS, db, get_tip, get_tips_and_total, get_backed_amount +from gittip import AMOUNTS, db, get_tip, get_tips_and_total, \ + get_backed_amount, get_histogram_of_giving from gittip.utils import wrap from gittip.networks import github, resolve_unclaimed @@ -64,6 +65,9 @@ else: my_tip = get_tip(user.id, participant['id']) +giving_histogram = get_histogram_of_giving(user.id) +total_givers = sum(giving_histogram.values()) + can_tip = True backed_amount = get_backed_amount(participant['id']) tip_or_pledge = "tip" @@ -72,7 +76,36 @@ # ========================================================================== ^L {% extends templates/participant.html %} {% block their_voice %} + <style> + /** This is blatantly copied from stats.html. We should really fix + that somehow. **/ + .dollar-sign { + padding: 0 2pt 0 24pt; + text-align: right; + } + .amount { + padding: 0 6pt 0 0; + text-align: right; + } + .amount-change { + padding: 6pt 0 6pt 24pt; + text-align: left; + } + .count { + text-align: left; + white-space: nowrap; + } + .count SPAN.number { + font-size: 8pt; + } + .count SPAN.bar { + background: #B2A196; + display: inline-block; + margin-right: 3pt; + height: 9pt; + } + #matrix TR.not-over BUTTON.empty { background: transparent; color: #F7F7F6; @@ -485,6 +518,30 @@ <h2>You receive ${{ backed_amount }} per week. <span class="small"><a href="history.html">History</a></span> </h2> + <table id="tip-distribution"> + {% for amount, contributors in giving_histogram.iteritems() %} + <tr> + <td class="dollar-sign">$</td> + <td class="amount">{{ amount }}</td> + <td class="count"> + <span class="bar" style="width: {{ ((1.0 * contributors) / total_givers) * 300 }}pt"></span> + {{((1.0 * contributors) / total_givers) * 100 }}% + <span class="number">({{ contributors }})</span> + </td> + </tr> + {% if str(amount) == '0.25' %} + <td class="amount-change help" colspan="3"> + <a href="https://github.com/whit537/www.gittip.com/issues/180" + target="_blank">Read + here</a> about raising the minimum. + </td> + {% end %} + {% end %} + </table> + + + + {% if backed_amount == 0 %} <p>How can you convince people to tip you? <b>Make the world
Show how many total fundees and the donation amount they're in for. As a reciever of money I'd like to know how diversified my $13/wk is so that I know how volatile my income is likely to be. How I see this working is a Histogram similar to: $1 [---] 1 $3 [] $6 [] $12 [---] 1 $24 [] which shows that I have 2 funders, one in for $1 and one in for $12.
chainer__chainer-3129
[ { "content": "import collections\nimport contextlib\nimport copy\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer import variable\n\n\ndef _is_shape(value):\n if value is None:\n return True\n elif isinstance(value, collections.Sequence):\n try:\n return all(int(x) for x in value)\n except TypeError:\n return False\n try:\n return int(value)\n except TypeError:\n return False\n\n\ndef _ensure_shape_dtype(value):\n # Return value paired with dtype FP32 if it is a shape.\n if _is_shape(value):\n return value, 'f'\n # Otherwise, returns it with assuming a shape-dtype pair.\n else:\n return value\n\n\nclass Link(object):\n\n \"\"\"Building block of model definitions.\n\n Link is a building block of neural network models that support various\n features like handling parameters, defining network fragments,\n serialization, etc.\n\n Link is the primitive structure for the model definitions. It supports\n management of parameter variables and *persistent values* that should be\n incorporated to serialization.\n\n Parameter is an instance of :class:`~chainer.Parameter` registered to a\n link. A :class:`~chainer.Parameter` object can be registered as a\n parameter of the link by assigning it to an attribute within *an\n initialization scope*, which is a code surrounded by a\n :meth:`init_scope` context manager using the ``with`` statement.\n\n Persistent values are arrays, scalars, or any other serializable values\n registered via :meth:`register_persistent` or :meth:`add_persistent`.\n\n .. note::\n Whereas arbitrary serializable objects can be registered as persistent\n values, it is strongly recommended to just register values that should\n be treated as results of learning. A typical example of persistent\n values is ones computed during training and required for testing, e.g.\n running statistics for batch normalization.\n\n Parameters and persistent values are referred by their names. They can be\n accessed as attributes of the links. Link class itself manages the lists\n of names of parameters and persistent values to distinguish parameters and\n persistent values from other attributes.\n\n Link can be composed into more complex models. This composition feature is\n supported by child classes like :class:`Chain` and :class:`ChainList`. One\n can create a chain by combining one or more links. See the documents for\n these classes for details.\n\n As noted above, Link supports the serialization protocol of the\n :class:`~chainer.Serializer` class. **Note that only parameters and\n persistent values are saved and loaded.** Other attributes are considered\n as a part of user program (i.e. a part of network definition). In order to\n construct a link from saved file, other attributes must be identically\n reconstructed by user codes.\n\n .. admonition:: Example\n\n This is a simple example of custom link definition. Chainer itself also\n provides many links defined under the :mod:`~chainer.links` module. They\n might serve as examples, too.\n\n Consider we want to define a simple primitive link that implements a\n fully-connected layer based on the :func:`~functions.linear` function.\n Note that this function takes input units, a weight variable, and a bias\n variable as arguments. Then, the fully-connected layer can be defined as\n follows::\n\n import chainer\n import chainer.functions as F\n from chainer import initializers\n import numpy as np\n\n class LinearLayer(chainer.Link):\n\n def __init__(self, n_in, n_out):\n super(LinearLayer, self).__init__()\n with self.init_scope():\n self.W = chainer.Parameter(\n initializers.Normal(), (n_out, n_in))\n self.b = chainer.Parameter(\n initializers.Zero(), (n_out,))\n\n def __call__(self, x):\n return F.linear(x, self.W, self.b)\n\n This example shows that a user can define arbitrary parameters and use\n them in any methods. Links typically implement the ``__call__``\n operator, although they can also provide other methods to implement the\n forward propagation.\n\n Args:\n params: *(deprecated since v2.0.0)* Names, shapes, and optional dtypes\n of initial parameters. The keywords are used as the parameter\n names and the corresponding values consist either of the shape or\n a tuple of shape and a dtype ``(shape, dtype)``. If only the shape\n is supplied, the default dtype will be used.\n\n Attributes:\n ~Link.name (str): Name of this link, given by the parent chain (if\n exists).\n\n \"\"\"\n\n def __init__(self, **params):\n self._params = set()\n self._persistent = set()\n self._cpu = True\n self._device_id = None\n self._within_init_scope = False\n self.name = None\n\n for name, value in six.iteritems(params):\n # Note: deprecation warning will be raised in add_param\n shape, dtype = _ensure_shape_dtype(value)\n self.add_param(name, shape, dtype=dtype)\n\n @property\n def xp(self):\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n return numpy if self._cpu else cuda.cupy\n\n @property\n def within_init_scope(self):\n \"\"\"True if the current code is inside of an initialization scope.\n\n See :meth:`init_scope` for the details of the initialization scope.\n\n \"\"\"\n return getattr(self, '_within_init_scope', False)\n\n @contextlib.contextmanager\n def init_scope(self):\n \"\"\"Creates an initialization scope.\n\n This method returns a context manager object that enables registration\n of parameters (and links for :class:`~chainer.Chain`) by an assignment.\n A :class:`~chainer.Parameter` object can be automatically registered\n by assigning it to an attribute under this context manager.\n\n .. admonition:: Example\n\n In most cases, the parameter registration is done in the\n initializer method. Using the ``init_scope`` method, we can\n simply assign a :class:`~chainer.Parameter` object to register\n it to the link.\n\n .. code-block:: python\n\n class MyLink(chainer.Link):\n def __init__(self):\n super().__init__()\n with self.init_scope():\n self.W = chainer.Parameter(0, (10, 5))\n self.b = chainer.Parameter(0, (5,))\n\n \"\"\"\n old_flag = self.within_init_scope\n self._within_init_scope = True\n try:\n yield\n finally:\n self._within_init_scope = old_flag\n\n def __setattr__(self, name, value):\n if self.within_init_scope and isinstance(value, variable.Parameter):\n value.name = name\n if not self._cpu:\n value.to_gpu(self._device_id)\n self._params.add(name)\n self._persistent.discard(name)\n super(Link, self).__setattr__(name, value)\n\n def __delattr__(self, name):\n self._params.discard(name)\n self._persistent.discard(name)\n super(Link, self).__delattr__(name)\n\n def add_param(self, name, shape=None, dtype=numpy.float32,\n initializer=None):\n \"\"\"Registers a parameter to the link.\n\n .. deprecated:: v2.0.0\n\n Assign a :class:`~chainer.Parameter` object directly to an\n attribute within :meth:`an initialization scope <init_scope>`\n instead. For example, the following code\n\n .. code-block:: python\n\n link.add_param('W', shape=(5, 3))\n\n can be replaced by the following assignment.\n\n .. code-block:: python\n\n with self.init_scope():\n link.W = chainer.Parameter(None, (5, 3))\n\n The latter one is easier for IDEs to keep track of the attribute's\n type.\n\n Args:\n name (str): Name of the parameter. This name is also used as the\n attribute name.\n shape (int or tuple of ints): Shape of the parameter array. If it\n is omitted, the parameter variable is left uninitialized.\n dtype: Data type of the parameter array.\n initializer: If it is not ``None``, the data is initialized with\n the given initializer. If it is an array, the data is directly\n initialized by it. If it is callable, it is used as a weight\n initializer. Note that in these cases, ``dtype`` argument is\n ignored.\n\n \"\"\"\n warnings.warn('''\\\nParameter registeration via Link.__init__ and Link.add_param are deprecated.\nAssign a Parameter object directly to an attribute within a \\\n\"with Link.init_scope():\" block instead.\n''', DeprecationWarning)\n if name in self.__dict__:\n raise AttributeError(\n 'cannot register a new parameter %s: attribute exists'\n % name)\n if initializer is None:\n initializer = initializers.NaN(dtype)\n param = variable.Parameter(initializer, shape)\n with self.init_scope():\n setattr(self, name, param)\n\n def add_persistent(self, name, value):\n \"\"\"Registers a persistent value to the link.\n\n The registered value is saved and loaded on serialization and\n deserialization. The value is set to an attribute of the link.\n\n Args:\n name (str): Name of the persistent value. This name is also used\n for the attribute name.\n value: Value to be registered.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new persistent value %s: attribute exists'\n % name)\n self._persistent.add(name)\n self._params.discard(name)\n d[name] = value\n\n def register_persistent(self, name):\n \"\"\"Registers an attribute of a given name as a persistent value.\n\n This is a convenient method to register an existing attribute as a\n persistent value. If ``name`` has been already registered as a\n parameter, this method removes it from the list of parameter names\n and re-registers it as a persistent value.\n\n Args:\n name (str): Name of the attribute to be registered.\n\n \"\"\"\n if not hasattr(self, name):\n raise AttributeError(\n 'cannot register non-existent attribute %s as a persistent '\n 'value' % name)\n self._persistent.add(name)\n self._params.discard(name)\n\n def copy(self):\n \"\"\"Copies the link hierarchy to new one.\n\n The whole hierarchy rooted by this link is copied. The copy is\n basically shallow, except that the parameter variables are also\n shallowly copied. It means that the parameter variables of copied one\n are different from ones of original link, while they share the data and\n gradient arrays.\n\n The name of the link is reset on the copy, since the copied instance\n does not belong to the original parent chain (even if exists).\n\n Returns:\n Link: Copied link object.\n\n \"\"\"\n ret = copy.copy(self)\n ret._params = set(self._params)\n ret._persistent = set(self._persistent)\n ret.name = None\n d = ret.__dict__\n for name in ret._params:\n d[name] = copy.copy(d[name])\n d[name].grad = None\n return ret\n\n def to_cpu(self):\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override this method to do so.\n\n Returns: self\n\n \"\"\"\n if self._cpu:\n return self\n d = self.__dict__\n for name in self._params:\n d[name].to_cpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, cuda.ndarray):\n d[name] = value.get()\n self._cpu = True\n self._device_id = None\n return self\n\n def to_gpu(self, device=None):\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to GPU, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n if not self._cpu:\n return self\n d = self.__dict__\n with cuda._get_device(device):\n for name in self._params:\n d[name].to_gpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, numpy.ndarray):\n d[name] = cuda.to_gpu(value)\n self._device_id = cuda.cupy.cuda.get_device_id()\n self._cpu = False\n return self\n\n def params(self, include_uninit=True):\n \"\"\"Returns a generator of all parameters under the link hierarchy.\n\n Args:\n include_uninit (bool): If ``True``, it also generates uninitialized\n parameters.\n\n Returns:\n A generator object that generates all parameters.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n if include_uninit or d[name].data is not None:\n yield d[name]\n\n def namedparams(self, include_uninit=True):\n \"\"\"Returns a generator of all (path, param) pairs under the hierarchy.\n\n Args:\n include_uninit (bool): If ``True``, it also generates uninitialized\n parameters.\n\n Returns:\n A generator object that generates all (path, parameter) pairs. The\n paths are relative from this link.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n if include_uninit or d[name].data is not None:\n yield '/' + name, d[name]\n\n def links(self, skipself=False):\n \"\"\"Returns a generator of all links under the hierarchy.\n\n Args:\n skipself (bool): If ``True``, then the generator skips this link\n and starts with the first child link.\n\n Returns:\n A generator object that generates all links.\n\n \"\"\"\n if not skipself:\n yield self\n\n def namedlinks(self, skipself=False):\n \"\"\"Returns a generator of all (path, link) pairs under the hierarchy.\n\n Args:\n skipself (bool): If ``True``, then the generator skips this link\n and starts with the first child link.\n\n Returns:\n A generator object that generates all (path, link) pairs.\n\n \"\"\"\n if not skipself:\n yield '/', self\n\n def children(self):\n \"\"\"Returns a generator of all child links.\n\n Returns:\n A generator object that generates all child links.\n\n \"\"\"\n if 0:\n yield\n\n def copyparams(self, link):\n \"\"\"Copies all parameters from given link.\n\n This method copies data arrays of all parameters in the hierarchy. The\n copy is even done across the host and devices. Note that this method\n does not copy the gradient arrays.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].copydata(src[name])\n\n def cleargrads(self):\n \"\"\"Clears all gradient arrays.\n\n This method should be called before the backward computation at every\n iteration of the optimization.\n\n \"\"\"\n for param in self.params():\n param.cleargrad()\n\n def zerograds(self):\n \"\"\"Initializes all gradient arrays by zero.\n\n This method can be used for the same purpose of cleargrads, but less\n efficient. This method is left for backward compatibility.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrads` instead.\n\n \"\"\"\n warnings.warn(\n 'Link.zerograds is deprecated. Use Link.cleargrads instead.',\n DeprecationWarning)\n for param in self.params():\n param.zerograd()\n\n def addgrads(self, link):\n \"\"\"Accumulates gradient values from given link.\n\n This method adds each gradient array of the given link to corresponding\n gradient array of this link. The accumulation is even done across\n host and different devices.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].addgrad(src[name])\n\n def enable_update(self):\n \"\"\"Enables update rules of all parameters under the link hierarchy.\n\n This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the\n update rule of each parameter variable to ``True``.\n\n \"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None:\n rule.enabled = True\n\n def disable_update(self):\n \"\"\"Disables update rules of all parameters under the link hierarchy.\n\n This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the\n update rule of each parameter variable to ``False``.\n\n \"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None:\n rule.enabled = False\n\n @property\n def update_enabled(self):\n \"\"\"``True`` if at least one parameter has an update rule enabled.\"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None and rule.enabled:\n return True\n return False\n\n def serialize(self, serializer):\n \"\"\"Serializes the link object.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer object.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n param = d[name]\n data = serializer(name, param.data)\n if param.data is None and data is not None:\n # Initialize the parameter here\n param.initialize(data.shape)\n if isinstance(param.data, numpy.ndarray):\n numpy.copyto(param.data, data)\n else:\n param.data.set(numpy.asarray(data))\n for name in self._persistent:\n d[name] = serializer(name, d[name])\n\n\nclass Chain(Link):\n\n \"\"\"Composable link with object-like interface.\n\n Composability is one of the most important features of neural nets. Neural\n net models consist of many reusable fragments, and each model itself might\n be embedded into a larger learnable system. Chain enables us to write a\n neural net based on composition, without bothering about routine works like\n collecting parameters, serialization, copying the structure with parameters\n shared, etc.\n\n This class actually provides a way to compose one or more links into one\n structure. A chain can contain one or more *child links*. Child link is a\n link registered to the chain with its own name. The child link is stored to\n an attribute of the chain with the name. User can write a whole model or a\n fragment of neural nets as a child class of Chain.\n\n Each chain itself is also a link. Therefore, one can combine chains into\n higher-level chains. In this way, links and chains construct a *link\n hierarchy*. Link hierarchy forms a tree structure, where each node is\n identified by the path from the root. The path is represented by a string\n like a file path in UNIX, consisting of names of nodes on the path, joined\n by slashes ``/``.\n\n A child link can be added just by assigning it to an attribute of the\n chain within :meth:`an initialization scope <chainer.Link.init_scope>`.\n\n The registered child link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is accessible via :meth:`children`\n generator, which returns a generator running through the children in\n registered order.\n\n On registration of a child link, its :attr:`~Link.name` attribute is also\n set (or overwritten if the link has already been registered to another\n chain).\n\n .. admonition:: Example\n\n This is a simple example of custom chain definition. Chainer itself also\n provides some chains defined under the :mod:`~chainer.links` module.\n They might serve as examples, too.\n\n Consider we want to define a multi-layer perceptron consisting of two\n hidden layers with rectifiers as activation functions. We can use the\n :class:`~chainer.links.Linear` link as a building block::\n\n import chainer\n import chainer.functions as F\n import chainer.links as L\n\n class MultiLayerPerceptron(chainer.Chain):\n\n def __init__(self, n_in, n_hidden, n_out):\n super(MultilayerPerceptron, self).__init__()\n with self.init_scope():\n self.layer1 = L.Linear(n_in, n_hidden)\n self.layer2 = L.Linear(n_hidden, n_hidden)\n self.layer3 = L.Linear(n_hidden, n_out)\n\n def __call__(self, x):\n # Forward propagation\n h1 = F.relu(self.layer1(x))\n h2 = F.relu(self.layer2(h1))\n return self.layer3(h2)\n\n Child links are registered via the assignment within a\n ``with self.init_scope():`` block. The forward propagation is often\n implemented as the ``__call__`` operator as the above example, though\n it is not mandatory.\n\n Args:\n links: Child links. The keywords are used as their names. The names are\n also set to the links.\n\n .. deprecated:: v2.0.0\n\n Assign child links directly to attributes, instead.\n\n \"\"\"\n\n def __init__(self, **links):\n super(Chain, self).__init__()\n self._children = set()\n\n for name, link in six.iteritems(links):\n self.add_link(name, link)\n\n def __getitem__(self, name):\n \"\"\"Equivalent to getattr.\"\"\"\n return getattr(self, name)\n\n def __setattr__(self, name, value):\n if self.within_init_scope and isinstance(value, Link):\n if hasattr(self, name):\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n value.name = name\n self._children.add(name)\n super(Chain, self).__setattr__(name, value)\n\n def __delattr__(self, name):\n self._children.discard(name)\n super(Chain, self).__delattr__(name)\n\n def add_link(self, name, link):\n \"\"\"Registers a child link to this chain.\n\n .. deprecated:: v2.0.0\n\n Assign the child link directly to an attribute within\n :meth:`an initialization scope <chainer.Link.init_scope>`, instead.\n For example, the following code\n\n .. code-block:: python\n\n chain.add_link('l1', L.Linear(3, 5))\n\n can be replaced by the following line.\n\n .. code-block:: python\n\n with self.init_scope():\n chain.l1 = L.Linear(3, 5)\n\n The latter one is easier for IDEs to keep track of the attribute's\n type.\n\n Args:\n name (str): Name of the child link. This name is also used as the\n attribute name.\n link (Link): The link object to be registered.\n\n \"\"\"\n warnings.warn('''\\\nChild link registeration via Chain.__init__ and Chain.add_link are deprecated.\nAssign a Link object directly to an attribute within a \\\n\"with link.init_scope():\" block instead.\n ''', DeprecationWarning)\n if name in self.__dict__:\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n if not isinstance(link, Link):\n raise TypeError('cannot register a non-link object as a child')\n with self.init_scope():\n setattr(self, name, link)\n\n def copy(self):\n ret = super(Chain, self).copy()\n ret._children = set(ret._children)\n d = ret.__dict__\n for name in ret._children:\n # copy child links recursively\n copied = d[name].copy()\n copied.name = name\n d[name] = copied\n return ret\n\n def to_cpu(self):\n super(Chain, self).to_cpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda._get_device(device):\n super(Chain, self).to_gpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_gpu()\n return self\n\n def params(self, include_uninit=True):\n for param in super(Chain, self).params(include_uninit):\n yield param\n d = self.__dict__\n for name in self._children:\n for param in d[name].params(include_uninit):\n yield param\n\n def namedparams(self, include_uninit=True):\n for ret in super(Chain, self).namedparams(include_uninit):\n yield ret\n d = self.__dict__\n for name in self._children:\n prefix = '/' + name\n for path, param in d[name].namedparams(include_uninit):\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n d = self.__dict__\n for name in self._children:\n for link in d[name].links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n d = self.__dict__\n for name in self._children:\n child = d[name]\n prefix = '/' + name\n yield prefix, child\n for path, link in d[name].namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n d = self.__dict__\n for name in self._children:\n yield d[name]\n\n def copyparams(self, link):\n super(Chain, self).copyparams(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].copyparams(src[name])\n\n def addgrads(self, link):\n super(Chain, self).addgrads(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].addgrads(src[name])\n\n def serialize(self, serializer):\n super(Chain, self).serialize(serializer)\n d = self.__dict__\n for name in self._children:\n d[name].serialize(serializer[name])\n\n\nclass ChainList(Link):\n\n \"\"\"Composable link with list-like interface.\n\n This is another example of compositional link. Unlike :class:`Chain`, this\n class can be used like a list of child links. Each child link is indexed by\n a non-negative integer, and it maintains the current number of registered\n child links. The :meth:`add_link` method inserts a new link at the end of\n the list. It is useful to write a chain with arbitrary number of child\n links, e.g. an arbitrarily deep multi-layer perceptron.\n\n Note that this class does not implement all methods of :class:`list`.\n\n Args:\n links: Initial child links.\n\n \"\"\"\n\n def __init__(self, *links):\n super(ChainList, self).__init__()\n self._children = []\n\n for link in links:\n self.add_link(link)\n\n def __getitem__(self, index):\n \"\"\"Returns the child at given index.\n\n Args:\n index (int): Index of the child in the list.\n\n Returns:\n Link: The ``index``-th child link.\n\n \"\"\"\n return self._children[index]\n\n def __iter__(self):\n return iter(self._children)\n\n def __len__(self):\n \"\"\"Returns the number of children.\"\"\"\n return len(self._children)\n\n def append(self, link):\n \"\"\"Registers a child link and adds it to the tail of the list.\n\n This is equivalent to :meth:`add_link`. This method has been added to\n emulate the ``list`` interface.\n\n Args:\n link (Link): The link object to be regsitered.\n\n \"\"\"\n self.add_link(link)\n\n def add_link(self, link):\n \"\"\"Registers a child link and adds it to the tail of the list.\n\n Args:\n link (Link): The link object to be registered.\n\n \"\"\"\n link.name = str(len(self._children))\n self._children.append(link)\n\n def copy(self):\n ret = super(ChainList, self).copy()\n ret._children = list(ret._children) # copy\n children = ret._children\n for i, child in enumerate(children):\n child = child.copy()\n child.name = str(i)\n children[i] = child\n return ret\n\n def to_cpu(self):\n super(ChainList, self).to_cpu()\n for link in self._children:\n link.to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda._get_device(device):\n super(ChainList, self).to_gpu()\n for link in self._children:\n link.to_gpu()\n return self\n\n def params(self, include_uninit=True):\n for param in super(ChainList, self).params(include_uninit):\n yield param\n for link in self._children:\n for param in link.params(include_uninit):\n yield param\n\n def namedparams(self, include_uninit=True):\n for ret in super(ChainList, self).namedparams(include_uninit):\n yield ret\n for idx, link in enumerate(self._children):\n prefix = '/%d' % idx\n for path, param in link.namedparams(include_uninit):\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n for child in self._children:\n for link in child.links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n for idx, child in enumerate(self._children):\n prefix = '/%d' % idx\n yield prefix, child\n for path, link in child.namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n for child in self._children:\n yield child\n\n def copyparams(self, link):\n super(ChainList, self).copyparams(link)\n for idx, child in enumerate(self._children):\n child.copyparams(link[idx])\n\n def addgrads(self, link):\n super(ChainList, self).addgrads(link)\n for idx, child in enumerate(self._children):\n child.addgrads(link[idx])\n\n def serialize(self, serializer):\n super(ChainList, self).serialize(serializer)\n for idx, child in enumerate(self._children):\n child.serialize(serializer['%d' % idx])\n", "path": "chainer/link.py" } ]
[ { "content": "import collections\nimport contextlib\nimport copy\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer import variable\n\n\ndef _is_shape(value):\n if value is None:\n return True\n elif isinstance(value, collections.Sequence):\n try:\n return all(int(x) for x in value)\n except TypeError:\n return False\n try:\n return int(value)\n except TypeError:\n return False\n\n\ndef _ensure_shape_dtype(value):\n # Return value paired with dtype FP32 if it is a shape.\n if _is_shape(value):\n return value, 'f'\n # Otherwise, returns it with assuming a shape-dtype pair.\n else:\n return value\n\n\nclass Link(object):\n\n \"\"\"Building block of model definitions.\n\n Link is a building block of neural network models that support various\n features like handling parameters, defining network fragments,\n serialization, etc.\n\n Link is the primitive structure for the model definitions. It supports\n management of parameter variables and *persistent values* that should be\n incorporated to serialization.\n\n Parameter is an instance of :class:`~chainer.Parameter` registered to a\n link. A :class:`~chainer.Parameter` object can be registered as a\n parameter of the link by assigning it to an attribute within *an\n initialization scope*, which is a code surrounded by a\n :meth:`init_scope` context manager using the ``with`` statement.\n\n Persistent values are arrays, scalars, or any other serializable values\n registered via :meth:`register_persistent` or :meth:`add_persistent`.\n\n .. note::\n Whereas arbitrary serializable objects can be registered as persistent\n values, it is strongly recommended to just register values that should\n be treated as results of learning. A typical example of persistent\n values is ones computed during training and required for testing, e.g.\n running statistics for batch normalization.\n\n Parameters and persistent values are referred by their names. They can be\n accessed as attributes of the links. Link class itself manages the lists\n of names of parameters and persistent values to distinguish parameters and\n persistent values from other attributes.\n\n Link can be composed into more complex models. This composition feature is\n supported by child classes like :class:`Chain` and :class:`ChainList`. One\n can create a chain by combining one or more links. See the documents for\n these classes for details.\n\n As noted above, Link supports the serialization protocol of the\n :class:`~chainer.Serializer` class. **Note that only parameters and\n persistent values are saved and loaded.** Other attributes are considered\n as a part of user program (i.e. a part of network definition). In order to\n construct a link from saved file, other attributes must be identically\n reconstructed by user codes.\n\n .. admonition:: Example\n\n This is a simple example of custom link definition. Chainer itself also\n provides many links defined under the :mod:`~chainer.links` module. They\n might serve as examples, too.\n\n Consider we want to define a simple primitive link that implements a\n fully-connected layer based on the :func:`~functions.linear` function.\n Note that this function takes input units, a weight variable, and a bias\n variable as arguments. Then, the fully-connected layer can be defined as\n follows::\n\n import chainer\n import chainer.functions as F\n from chainer import initializers\n import numpy as np\n\n class LinearLayer(chainer.Link):\n\n def __init__(self, n_in, n_out):\n super(LinearLayer, self).__init__()\n with self.init_scope():\n self.W = chainer.Parameter(\n initializers.Normal(), (n_out, n_in))\n self.b = chainer.Parameter(\n initializers.Zero(), (n_out,))\n\n def __call__(self, x):\n return F.linear(x, self.W, self.b)\n\n This example shows that a user can define arbitrary parameters and use\n them in any methods. Links typically implement the ``__call__``\n operator, although they can also provide other methods to implement the\n forward propagation.\n\n Args:\n params: *(deprecated since v2.0.0)* Names, shapes, and optional dtypes\n of initial parameters. The keywords are used as the parameter\n names and the corresponding values consist either of the shape or\n a tuple of shape and a dtype ``(shape, dtype)``. If only the shape\n is supplied, the default dtype will be used.\n\n Attributes:\n ~Link.name (str): Name of this link, given by the parent chain (if\n exists).\n\n \"\"\"\n\n def __init__(self, **params):\n self._params = set()\n self._persistent = set()\n self._cpu = True\n self._device_id = None\n self._within_init_scope = False\n self.name = None\n\n for name, value in six.iteritems(params):\n # Note: deprecation warning will be raised in add_param\n shape, dtype = _ensure_shape_dtype(value)\n self.add_param(name, shape, dtype=dtype)\n\n @property\n def xp(self):\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n return numpy if self._cpu else cuda.cupy\n\n @property\n def within_init_scope(self):\n \"\"\"True if the current code is inside of an initialization scope.\n\n See :meth:`init_scope` for the details of the initialization scope.\n\n \"\"\"\n return getattr(self, '_within_init_scope', False)\n\n @contextlib.contextmanager\n def init_scope(self):\n \"\"\"Creates an initialization scope.\n\n This method returns a context manager object that enables registration\n of parameters (and links for :class:`~chainer.Chain`) by an assignment.\n A :class:`~chainer.Parameter` object can be automatically registered\n by assigning it to an attribute under this context manager.\n\n .. admonition:: Example\n\n In most cases, the parameter registration is done in the\n initializer method. Using the ``init_scope`` method, we can\n simply assign a :class:`~chainer.Parameter` object to register\n it to the link.\n\n .. code-block:: python\n\n class MyLink(chainer.Link):\n def __init__(self):\n super().__init__()\n with self.init_scope():\n self.W = chainer.Parameter(0, (10, 5))\n self.b = chainer.Parameter(0, (5,))\n\n \"\"\"\n old_flag = self.within_init_scope\n self._within_init_scope = True\n try:\n yield\n finally:\n self._within_init_scope = old_flag\n\n def __setattr__(self, name, value):\n if self.within_init_scope and isinstance(value, variable.Parameter):\n value.name = name\n if not self._cpu:\n value.to_gpu(self._device_id)\n self._params.add(name)\n self._persistent.discard(name)\n super(Link, self).__setattr__(name, value)\n\n def __delattr__(self, name):\n self._params.discard(name)\n self._persistent.discard(name)\n super(Link, self).__delattr__(name)\n\n def add_param(self, name, shape=None, dtype=numpy.float32,\n initializer=None):\n \"\"\"Registers a parameter to the link.\n\n .. deprecated:: v2.0.0\n\n Assign a :class:`~chainer.Parameter` object directly to an\n attribute within :meth:`an initialization scope <init_scope>`\n instead. For example, the following code\n\n .. code-block:: python\n\n link.add_param('W', shape=(5, 3))\n\n can be replaced by the following assignment.\n\n .. code-block:: python\n\n with self.init_scope():\n link.W = chainer.Parameter(None, (5, 3))\n\n The latter one is easier for IDEs to keep track of the attribute's\n type.\n\n Args:\n name (str): Name of the parameter. This name is also used as the\n attribute name.\n shape (int or tuple of ints): Shape of the parameter array. If it\n is omitted, the parameter variable is left uninitialized.\n dtype: Data type of the parameter array.\n initializer: If it is not ``None``, the data is initialized with\n the given initializer. If it is an array, the data is directly\n initialized by it. If it is callable, it is used as a weight\n initializer. Note that in these cases, ``dtype`` argument is\n ignored.\n\n \"\"\"\n warnings.warn('''\\\nParameter registeration via Link.__init__ and Link.add_param are deprecated.\nAssign a Parameter object directly to an attribute within a \\\n\"with Link.init_scope():\" block instead.\n''', DeprecationWarning)\n if name in self.__dict__:\n raise AttributeError(\n 'cannot register a new parameter %s: attribute exists'\n % name)\n if initializer is None:\n initializer = initializers.NaN(dtype)\n param = variable.Parameter(initializer, shape)\n with self.init_scope():\n setattr(self, name, param)\n\n def add_persistent(self, name, value):\n \"\"\"Registers a persistent value to the link.\n\n The registered value is saved and loaded on serialization and\n deserialization. The value is set to an attribute of the link.\n\n Args:\n name (str): Name of the persistent value. This name is also used\n for the attribute name.\n value: Value to be registered.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new persistent value %s: attribute exists'\n % name)\n self._persistent.add(name)\n self._params.discard(name)\n d[name] = value\n\n def register_persistent(self, name):\n \"\"\"Registers an attribute of a given name as a persistent value.\n\n This is a convenient method to register an existing attribute as a\n persistent value. If ``name`` has been already registered as a\n parameter, this method removes it from the list of parameter names\n and re-registers it as a persistent value.\n\n Args:\n name (str): Name of the attribute to be registered.\n\n \"\"\"\n if not hasattr(self, name):\n raise AttributeError(\n 'cannot register non-existent attribute %s as a persistent '\n 'value' % name)\n self._persistent.add(name)\n self._params.discard(name)\n\n def copy(self):\n \"\"\"Copies the link hierarchy to new one.\n\n The whole hierarchy rooted by this link is copied. The copy is\n basically shallow, except that the parameter variables are also\n shallowly copied. It means that the parameter variables of copied one\n are different from ones of original link, while they share the data and\n gradient arrays.\n\n The name of the link is reset on the copy, since the copied instance\n does not belong to the original parent chain (even if exists).\n\n Returns:\n Link: Copied link object.\n\n \"\"\"\n ret = copy.copy(self)\n ret._params = set(self._params)\n ret._persistent = set(self._persistent)\n ret.name = None\n d = ret.__dict__\n for name in ret._params:\n d[name] = copy.copy(d[name])\n d[name].grad = None\n return ret\n\n def to_cpu(self):\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override this method to do so.\n\n Returns: self\n\n \"\"\"\n if self._cpu:\n return self\n d = self.__dict__\n for name in self._params:\n d[name].to_cpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, cuda.ndarray):\n d[name] = value.get()\n self._cpu = True\n self._device_id = None\n return self\n\n def to_gpu(self, device=None):\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to GPU, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n if not self._cpu:\n return self\n d = self.__dict__\n with cuda._get_device(device):\n for name in self._params:\n d[name].to_gpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, numpy.ndarray):\n d[name] = cuda.to_gpu(value)\n self._device_id = cuda.cupy.cuda.get_device_id()\n self._cpu = False\n return self\n\n def params(self, include_uninit=True):\n \"\"\"Returns a generator of all parameters under the link hierarchy.\n\n Args:\n include_uninit (bool): If ``True``, it also generates uninitialized\n parameters.\n\n Returns:\n A generator object that generates all parameters.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n if include_uninit or d[name].data is not None:\n yield d[name]\n\n def namedparams(self, include_uninit=True):\n \"\"\"Returns a generator of all (path, param) pairs under the hierarchy.\n\n Args:\n include_uninit (bool): If ``True``, it also generates uninitialized\n parameters.\n\n Returns:\n A generator object that generates all (path, parameter) pairs. The\n paths are relative from this link.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n if include_uninit or d[name].data is not None:\n yield '/' + name, d[name]\n\n def links(self, skipself=False):\n \"\"\"Returns a generator of all links under the hierarchy.\n\n Args:\n skipself (bool): If ``True``, then the generator skips this link\n and starts with the first child link.\n\n Returns:\n A generator object that generates all links.\n\n \"\"\"\n if not skipself:\n yield self\n\n def namedlinks(self, skipself=False):\n \"\"\"Returns a generator of all (path, link) pairs under the hierarchy.\n\n Args:\n skipself (bool): If ``True``, then the generator skips this link\n and starts with the first child link.\n\n Returns:\n A generator object that generates all (path, link) pairs.\n\n \"\"\"\n if not skipself:\n yield '/', self\n\n def children(self):\n \"\"\"Returns a generator of all child links.\n\n Returns:\n A generator object that generates all child links.\n\n \"\"\"\n if 0:\n yield\n\n def copyparams(self, link):\n \"\"\"Copies all parameters from given link.\n\n This method copies data arrays of all parameters in the hierarchy. The\n copy is even done across the host and devices. Note that this method\n does not copy the gradient arrays.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].copydata(src[name])\n\n def cleargrads(self):\n \"\"\"Clears all gradient arrays.\n\n This method should be called before the backward computation at every\n iteration of the optimization.\n\n \"\"\"\n for param in self.params():\n param.cleargrad()\n\n def zerograds(self):\n \"\"\"Initializes all gradient arrays by zero.\n\n This method can be used for the same purpose of cleargrads, but less\n efficient. This method is left for backward compatibility.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrads` instead.\n\n \"\"\"\n warnings.warn(\n 'Link.zerograds is deprecated. Use Link.cleargrads instead.',\n DeprecationWarning)\n for param in self.params():\n param.zerograd()\n\n def addgrads(self, link):\n \"\"\"Accumulates gradient values from given link.\n\n This method adds each gradient array of the given link to corresponding\n gradient array of this link. The accumulation is even done across\n host and different devices.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].addgrad(src[name])\n\n def enable_update(self):\n \"\"\"Enables update rules of all parameters under the link hierarchy.\n\n This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the\n update rule of each parameter variable to ``True``.\n\n \"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None:\n rule.enabled = True\n\n def disable_update(self):\n \"\"\"Disables update rules of all parameters under the link hierarchy.\n\n This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the\n update rule of each parameter variable to ``False``.\n\n \"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None:\n rule.enabled = False\n\n @property\n def update_enabled(self):\n \"\"\"``True`` if at least one parameter has an update rule enabled.\"\"\"\n for param in self.params():\n rule = param.update_rule\n if rule is not None and rule.enabled:\n return True\n return False\n\n def serialize(self, serializer):\n \"\"\"Serializes the link object.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer object.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n param = d[name]\n data = serializer(name, param.data)\n if param.data is None and data is not None:\n # Initialize the parameter here\n param.initialize(data.shape)\n if isinstance(param.data, numpy.ndarray):\n numpy.copyto(param.data, data)\n else:\n param.data.set(numpy.asarray(data))\n for name in self._persistent:\n d[name] = serializer(name, d[name])\n\n\nclass Chain(Link):\n\n \"\"\"Composable link with object-like interface.\n\n Composability is one of the most important features of neural nets. Neural\n net models consist of many reusable fragments, and each model itself might\n be embedded into a larger learnable system. Chain enables us to write a\n neural net based on composition, without bothering about routine works like\n collecting parameters, serialization, copying the structure with parameters\n shared, etc.\n\n This class actually provides a way to compose one or more links into one\n structure. A chain can contain one or more *child links*. Child link is a\n link registered to the chain with its own name. The child link is stored to\n an attribute of the chain with the name. User can write a whole model or a\n fragment of neural nets as a child class of Chain.\n\n Each chain itself is also a link. Therefore, one can combine chains into\n higher-level chains. In this way, links and chains construct a *link\n hierarchy*. Link hierarchy forms a tree structure, where each node is\n identified by the path from the root. The path is represented by a string\n like a file path in UNIX, consisting of names of nodes on the path, joined\n by slashes ``/``.\n\n A child link can be added just by assigning it to an attribute of the\n chain within :meth:`an initialization scope <chainer.Link.init_scope>`.\n\n The registered child link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is accessible via :meth:`children`\n generator, which returns a generator running through the children in\n registered order.\n\n On registration of a child link, its :attr:`~Link.name` attribute is also\n set (or overwritten if the link has already been registered to another\n chain).\n\n .. admonition:: Example\n\n This is a simple example of custom chain definition. Chainer itself also\n provides some chains defined under the :mod:`~chainer.links` module.\n They might serve as examples, too.\n\n Consider we want to define a multi-layer perceptron consisting of two\n hidden layers with rectifiers as activation functions. We can use the\n :class:`~chainer.links.Linear` link as a building block::\n\n import chainer\n import chainer.functions as F\n import chainer.links as L\n\n class MultiLayerPerceptron(chainer.Chain):\n\n def __init__(self, n_in, n_hidden, n_out):\n super(MultilayerPerceptron, self).__init__()\n with self.init_scope():\n self.layer1 = L.Linear(n_in, n_hidden)\n self.layer2 = L.Linear(n_hidden, n_hidden)\n self.layer3 = L.Linear(n_hidden, n_out)\n\n def __call__(self, x):\n # Forward propagation\n h1 = F.relu(self.layer1(x))\n h2 = F.relu(self.layer2(h1))\n return self.layer3(h2)\n\n Child links are registered via the assignment within a\n ``with self.init_scope():`` block. The forward propagation is often\n implemented as the ``__call__`` operator as the above example, though\n it is not mandatory.\n\n Args:\n links: Child links. The keywords are used as their names. The names are\n also set to the links.\n\n .. deprecated:: v2.0.0\n\n Assign child links directly to attributes, instead.\n\n \"\"\"\n\n def __init__(self, **links):\n super(Chain, self).__init__()\n self._children = set()\n\n for name, link in six.iteritems(links):\n self.add_link(name, link)\n\n def __getitem__(self, name):\n \"\"\"Equivalent to getattr.\"\"\"\n return getattr(self, name)\n\n def __setattr__(self, name, value):\n if self.within_init_scope and isinstance(value, Link):\n if hasattr(self, name):\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n value.name = name\n self._children.add(name)\n super(Chain, self).__setattr__(name, value)\n\n def __delattr__(self, name):\n self._children.discard(name)\n super(Chain, self).__delattr__(name)\n\n def add_link(self, name, link):\n \"\"\"Registers a child link to this chain.\n\n .. deprecated:: v2.0.0\n\n Assign the child link directly to an attribute within\n :meth:`an initialization scope <chainer.Link.init_scope>`, instead.\n For example, the following code\n\n .. code-block:: python\n\n chain.add_link('l1', L.Linear(3, 5))\n\n can be replaced by the following line.\n\n .. code-block:: python\n\n with self.init_scope():\n chain.l1 = L.Linear(3, 5)\n\n The latter one is easier for IDEs to keep track of the attribute's\n type.\n\n Args:\n name (str): Name of the child link. This name is also used as the\n attribute name.\n link (Link): The link object to be registered.\n\n \"\"\"\n warnings.warn('''\\\nChild link registeration via Chain.__init__ and Chain.add_link are deprecated.\nAssign a Link object directly to an attribute within a \\\n\"with link.init_scope():\" block instead.\n ''', DeprecationWarning)\n if name in self.__dict__:\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n if not isinstance(link, Link):\n raise TypeError('cannot register a non-link object as a child')\n with self.init_scope():\n setattr(self, name, link)\n\n def copy(self):\n ret = super(Chain, self).copy()\n ret._children = set(ret._children)\n d = ret.__dict__\n for name in ret._children:\n # copy child links recursively\n copied = d[name].copy()\n copied.name = name\n d[name] = copied\n return ret\n\n def to_cpu(self):\n super(Chain, self).to_cpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda._get_device(device):\n super(Chain, self).to_gpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_gpu()\n return self\n\n def params(self, include_uninit=True):\n for param in super(Chain, self).params(include_uninit):\n yield param\n d = self.__dict__\n for name in self._children:\n for param in d[name].params(include_uninit):\n yield param\n\n def namedparams(self, include_uninit=True):\n for ret in super(Chain, self).namedparams(include_uninit):\n yield ret\n d = self.__dict__\n for name in self._children:\n prefix = '/' + name\n for path, param in d[name].namedparams(include_uninit):\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n d = self.__dict__\n for name in self._children:\n for link in d[name].links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n d = self.__dict__\n for name in self._children:\n child = d[name]\n prefix = '/' + name\n yield prefix, child\n for path, link in d[name].namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n d = self.__dict__\n for name in self._children:\n yield d[name]\n\n def copyparams(self, link):\n super(Chain, self).copyparams(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].copyparams(src[name])\n\n def addgrads(self, link):\n super(Chain, self).addgrads(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].addgrads(src[name])\n\n def serialize(self, serializer):\n super(Chain, self).serialize(serializer)\n d = self.__dict__\n for name in self._children:\n d[name].serialize(serializer[name])\n\n\nclass ChainList(Link):\n\n \"\"\"Composable link with list-like interface.\n\n This is another example of compositional link. Unlike :class:`Chain`, this\n class can be used like a list of child links. Each child link is indexed by\n a non-negative integer, and it maintains the current number of registered\n child links. The :meth:`add_link` method inserts a new link at the end of\n the list. It is useful to write a chain with arbitrary number of child\n links, e.g. an arbitrarily deep multi-layer perceptron.\n\n Note that this class does not implement all methods of :class:`list`.\n\n Args:\n links: Initial child links.\n\n \"\"\"\n\n def __init__(self, *links):\n super(ChainList, self).__init__()\n self._children = []\n\n for link in links:\n self.add_link(link)\n\n def __setattr__(self, name, value):\n if self.within_init_scope and isinstance(value, Link):\n raise TypeError(\n 'cannot register a new link'\n ' within a \"with chainlist.init_scope():\" block.')\n super(ChainList, self).__setattr__(name, value)\n\n def __getitem__(self, index):\n \"\"\"Returns the child at given index.\n\n Args:\n index (int): Index of the child in the list.\n\n Returns:\n Link: The ``index``-th child link.\n\n \"\"\"\n return self._children[index]\n\n def __iter__(self):\n return iter(self._children)\n\n def __len__(self):\n \"\"\"Returns the number of children.\"\"\"\n return len(self._children)\n\n def append(self, link):\n \"\"\"Registers a child link and adds it to the tail of the list.\n\n This is equivalent to :meth:`add_link`. This method has been added to\n emulate the ``list`` interface.\n\n Args:\n link (Link): The link object to be regsitered.\n\n \"\"\"\n self.add_link(link)\n\n def add_link(self, link):\n \"\"\"Registers a child link and adds it to the tail of the list.\n\n Args:\n link (Link): The link object to be registered.\n\n \"\"\"\n link.name = str(len(self._children))\n self._children.append(link)\n\n def copy(self):\n ret = super(ChainList, self).copy()\n ret._children = list(ret._children) # copy\n children = ret._children\n for i, child in enumerate(children):\n child = child.copy()\n child.name = str(i)\n children[i] = child\n return ret\n\n def to_cpu(self):\n super(ChainList, self).to_cpu()\n for link in self._children:\n link.to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda._get_device(device):\n super(ChainList, self).to_gpu()\n for link in self._children:\n link.to_gpu()\n return self\n\n def params(self, include_uninit=True):\n for param in super(ChainList, self).params(include_uninit):\n yield param\n for link in self._children:\n for param in link.params(include_uninit):\n yield param\n\n def namedparams(self, include_uninit=True):\n for ret in super(ChainList, self).namedparams(include_uninit):\n yield ret\n for idx, link in enumerate(self._children):\n prefix = '/%d' % idx\n for path, param in link.namedparams(include_uninit):\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n for child in self._children:\n for link in child.links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n for idx, child in enumerate(self._children):\n prefix = '/%d' % idx\n yield prefix, child\n for path, link in child.namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n for child in self._children:\n yield child\n\n def copyparams(self, link):\n super(ChainList, self).copyparams(link)\n for idx, child in enumerate(self._children):\n child.copyparams(link[idx])\n\n def addgrads(self, link):\n super(ChainList, self).addgrads(link)\n for idx, child in enumerate(self._children):\n child.addgrads(link[idx])\n\n def serialize(self, serializer):\n super(ChainList, self).serialize(serializer)\n for idx, child in enumerate(self._children):\n child.serialize(serializer['%d' % idx])\n", "path": "chainer/link.py" } ]
diff --git a/chainer/link.py b/chainer/link.py index 57b3803bb210..bd25006b1022 100644 --- a/chainer/link.py +++ b/chainer/link.py @@ -820,6 +820,13 @@ def __init__(self, *links): for link in links: self.add_link(link) + def __setattr__(self, name, value): + if self.within_init_scope and isinstance(value, Link): + raise TypeError( + 'cannot register a new link' + ' within a "with chainlist.init_scope():" block.') + super(ChainList, self).__setattr__(name, value) + def __getitem__(self, index): """Returns the child at given index. diff --git a/tests/chainer_tests/test_link.py b/tests/chainer_tests/test_link.py index 88b2901e1efb..42ef41bff0ff 100644 --- a/tests/chainer_tests/test_link.py +++ b/tests/chainer_tests/test_link.py @@ -744,6 +744,18 @@ def test_append(self): self.assertIs(self.c2[1], self.l3) self.assertEqual(self.l3.name, '1') + def test_assign_param_in_init_scope(self): + p = chainer.Parameter() + with self.c1.init_scope(): + self.c1.p = p + self.assertIn(p, self.c1.params()) + + def test_assign_link_in_init_scope(self): + l = chainer.Link() + with self.c1.init_scope(): + with self.assertRaises(TypeError): + self.c1.l = l + def test_iter(self): links = list(self.c2) self.assertEqual(2, len(links))
ChainList doesn't warn self.init_scope() The following code has a bug. Chainer doesn't warn the usage of `with self.init_scope()` in the subclass of ChainList. Could you add a warning message for such mistakes? * Code to reproduce ```python import chainer from chainer import Chain, ChainList import chainer.functions as F import chainer.links as L import numpy as np class C(ChainList): # should be 'class C(Chain)' def __init__(self): super().__init__() with self.init_scope(): self.l1 = L.Linear(5, 5) self.l2 = L.Linear(5, 5) def __call__(self, x): return self.l2(F.relu(self.l1(x))) c = C() print(c.l1.W.data) opt = chainer.optimizers.SGD() opt.setup(c) x = np.arange(10).reshape((2, 5)).astype(np.float32) loss = F.sum(c(x)) opt.update(lambda: loss) print(c.l1.W.data) ``` output: ``` [[ 0.22224635 0.13709065 -0.0590423 0.31734523 0.76646286] [-0.09569775 -0.00810872 0.72896075 -0.50144166 -0.23909038] [ 0.24655567 -0.59849507 0.05945947 -0.06633393 -0.05738653] [-0.85418522 0.56111503 -0.4280332 -0.19436245 -0.09941436] [-0.06522682 -0.43800679 0.7132498 0.49363273 -0.2827867 ]] [[ 0.22224635 0.13709065 -0.0590423 0.31734523 0.76646286] [-0.09569775 -0.00810872 0.72896075 -0.50144166 -0.23909038] [ 0.24655567 -0.59849507 0.05945947 -0.06633393 -0.05738653] [-0.85418522 0.56111503 -0.4280332 -0.19436245 -0.09941436] [-0.06522682 -0.43800679 0.7132498 0.49363273 -0.2827867 ]] ``` No update is taken because the link l1 is not registered.
fonttools__fonttools-1715
[ { "content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py" } ]
[ { "content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\t# OpenType spec renamed these, add aliases for compatibility\n\t@property\n\tdef ascender(self): return self.ascent\n\n\[email protected]\n\tdef ascender(self,value): self.ascent = value\n\n\t@property\n\tdef descender(self): return self.descent\n\n\[email protected]\n\tdef descender(self,value): self.descent = value\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py" } ]
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py index 2520fb3b76..0f5ec51d9d 100644 --- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py +++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py @@ -35,6 +35,19 @@ class table__h_h_e_a(DefaultTable.DefaultTable): dependencies = ['hmtx', 'glyf', 'CFF '] + # OpenType spec renamed these, add aliases for compatibility + @property + def ascender(self): return self.ascent + + @ascender.setter + def ascender(self,value): self.ascent = value + + @property + def descender(self): return self.descent + + @descender.setter + def descender(self,value): self.descent = value + def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self) diff --git a/Tests/ttLib/tables/_h_h_e_a_test.py b/Tests/ttLib/tables/_h_h_e_a_test.py index eb457dee2c..5c03b2073a 100644 --- a/Tests/ttLib/tables/_h_h_e_a_test.py +++ b/Tests/ttLib/tables/_h_h_e_a_test.py @@ -126,6 +126,13 @@ def test_toXML_version_as_float(self): len([r for r in captor.records if "Table version value is a float" in r.msg]) == 1) + def test_aliases(self): + hhea = self.font['hhea'] + self.assertEqual(hhea.ascent, hhea.ascender) + self.assertEqual(hhea.descent, hhea.descender) + hhea.ascender = 800 + self.assertEqual(hhea.ascent, 800) + hhea.ascent = 750 class HheaDecompileOrFromXMLTest(unittest.TestCase):
ascender and ascent The [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table "`ascender`" and "`descender`". fonttools calls them "`ascent`" and "`descent`". This was surprising! Maybe it's too late to change then but can we at least have an alias?
celery__celery-7553
[ { "content": "\"\"\"Functional-style utilities.\"\"\"\nimport inspect\nimport sys\nfrom collections import UserList\nfrom functools import partial\nfrom itertools import islice, tee, zip_longest\n\nfrom kombu.utils.functional import LRUCache, dictfilter, is_list, lazy, maybe_evaluate, maybe_list, memoize\nfrom vine import promise\n\n__all__ = (\n 'LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop',\n 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq',\n 'regen', 'dictfilter', 'lazy', 'maybe_evaluate', 'head_from_fun',\n 'maybe', 'fun_accepts_kwargs',\n)\n\nFUNHEAD_TEMPLATE = \"\"\"\ndef {fun_name}({fun_args}):\n return {fun_value}\n\"\"\"\n\n\nclass DummyContext:\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n pass\n\n\nclass mlazy(lazy):\n \"\"\"Memoized lazy evaluation.\n\n The function is only evaluated once, every subsequent access\n will return the same value.\n \"\"\"\n\n #: Set to :const:`True` after the object has been evaluated.\n evaluated = False\n _value = None\n\n def evaluate(self):\n if not self.evaluated:\n self._value = super().evaluate()\n self.evaluated = True\n return self._value\n\n\ndef noop(*args, **kwargs):\n \"\"\"No operation.\n\n Takes any arguments/keyword arguments and does nothing.\n \"\"\"\n\n\ndef pass1(arg, *args, **kwargs):\n \"\"\"Return the first positional argument.\"\"\"\n return arg\n\n\ndef evaluate_promises(it):\n for value in it:\n if isinstance(value, promise):\n value = value()\n yield value\n\n\ndef first(predicate, it):\n \"\"\"Return the first element in ``it`` that ``predicate`` accepts.\n\n If ``predicate`` is None it will return the first item that's not\n :const:`None`.\n \"\"\"\n return next(\n (v for v in evaluate_promises(it) if (\n predicate(v) if predicate is not None else v is not None)),\n None,\n )\n\n\ndef firstmethod(method, on_call=None):\n \"\"\"Multiple dispatch.\n\n Return a function that with a list of instances,\n finds the first instance that gives a value for the given method.\n\n The list can also contain lazy instances\n (:class:`~kombu.utils.functional.lazy`.)\n \"\"\"\n\n def _matcher(it, *args, **kwargs):\n for obj in it:\n try:\n meth = getattr(maybe_evaluate(obj), method)\n reply = (on_call(meth, *args, **kwargs) if on_call\n else meth(*args, **kwargs))\n except AttributeError:\n pass\n else:\n if reply is not None:\n return reply\n\n return _matcher\n\n\ndef chunks(it, n):\n \"\"\"Split an iterator into chunks with `n` elements each.\n\n Warning:\n ``it`` must be an actual iterator, if you pass this a\n concrete sequence will get you repeating elements.\n\n So ``chunks(iter(range(1000)), 10)`` is fine, but\n ``chunks(range(1000), 10)`` is not.\n\n Example:\n # n == 2\n >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)\n >>> list(x)\n [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]\n\n # n == 3\n >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)\n >>> list(x)\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n \"\"\"\n for item in it:\n yield [item] + list(islice(it, n - 1))\n\n\ndef padlist(container, size, default=None):\n \"\"\"Pad list with default elements.\n\n Example:\n >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3)\n ('George', 'Costanza', 'NYC')\n >>> first, last, city = padlist(['George', 'Costanza'], 3)\n ('George', 'Costanza', None)\n >>> first, last, city, planet = padlist(\n ... ['George', 'Costanza', 'NYC'], 4, default='Earth',\n ... )\n ('George', 'Costanza', 'NYC', 'Earth')\n \"\"\"\n return list(container)[:size] + [default] * (size - len(container))\n\n\ndef mattrgetter(*attrs):\n \"\"\"Get attributes, ignoring attribute errors.\n\n Like :func:`operator.itemgetter` but return :const:`None` on missing\n attributes instead of raising :exc:`AttributeError`.\n \"\"\"\n return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs}\n\n\ndef uniq(it):\n \"\"\"Return all unique elements in ``it``, preserving order.\"\"\"\n seen = set()\n return (seen.add(obj) or obj for obj in it if obj not in seen)\n\n\ndef lookahead(it):\n \"\"\"Yield pairs of (current, next) items in `it`.\n\n `next` is None if `current` is the last item.\n Example:\n >>> list(lookahead(x for x in range(6)))\n [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)]\n \"\"\"\n a, b = tee(it)\n next(b, None)\n return zip_longest(a, b)\n\n\ndef regen(it):\n \"\"\"Convert iterator to an object that can be consumed multiple times.\n\n ``Regen`` takes any iterable, and if the object is an\n generator it will cache the evaluated list on first access,\n so that the generator can be \"consumed\" multiple times.\n \"\"\"\n if isinstance(it, (list, tuple)):\n return it\n return _regen(it)\n\n\nclass _regen(UserList, list):\n # must be subclass of list so that json can encode.\n\n def __init__(self, it):\n # pylint: disable=super-init-not-called\n # UserList creates a new list and sets .data, so we don't\n # want to call init here.\n self.__it = it\n self.__consumed = []\n self.__done = False\n\n def __reduce__(self):\n return list, (self.data,)\n\n def __length_hint__(self):\n return self.__it.__length_hint__()\n\n def __lookahead_consume(self, limit=None):\n if not self.__done and (limit is None or limit > 0):\n it = iter(self.__it)\n try:\n now = next(it)\n except StopIteration:\n return\n self.__consumed.append(now)\n # Maintain a single look-ahead to ensure we set `__done` when the\n # underlying iterator gets exhausted\n while not self.__done:\n try:\n next_ = next(it)\n self.__consumed.append(next_)\n except StopIteration:\n self.__done = True\n break\n finally:\n yield now\n now = next_\n # We can break out when `limit` is exhausted\n if limit is not None:\n limit -= 1\n if limit <= 0:\n break\n\n def __iter__(self):\n yield from self.__consumed\n yield from self.__lookahead_consume()\n\n def __getitem__(self, index):\n if index < 0:\n return self.data[index]\n # Consume elements up to the desired index prior to attempting to\n # access it from within `__consumed`\n consume_count = index - len(self.__consumed) + 1\n for _ in self.__lookahead_consume(limit=consume_count):\n pass\n return self.__consumed[index]\n\n def __bool__(self):\n if len(self.__consumed):\n return True\n\n try:\n next(iter(self))\n except StopIteration:\n return False\n else:\n return True\n\n @property\n def data(self):\n if not self.__done:\n self.__consumed.extend(self.__it)\n self.__done = True\n return self.__consumed\n\n def __repr__(self):\n return \"<{}: [{}{}]>\".format(\n self.__class__.__name__,\n \", \".join(repr(e) for e in self.__consumed),\n \"...\" if not self.__done else \"\",\n )\n\n\ndef _argsfromspec(spec, replace_defaults=True):\n if spec.defaults:\n split = len(spec.defaults)\n defaults = (list(range(len(spec.defaults))) if replace_defaults\n else spec.defaults)\n positional = spec.args[:-split]\n optional = list(zip(spec.args[-split:], defaults))\n else:\n positional, optional = spec.args, []\n\n varargs = spec.varargs\n varkw = spec.varkw\n if spec.kwonlydefaults:\n kwonlyargs = set(spec.kwonlyargs) - set(spec.kwonlydefaults.keys())\n if replace_defaults:\n kwonlyargs_optional = [\n (kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys())\n ]\n else:\n kwonlyargs_optional = list(spec.kwonlydefaults.items())\n else:\n kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, []\n\n return ', '.join(filter(None, [\n ', '.join(positional),\n ', '.join(f'{k}={v}' for k, v in optional),\n f'*{varargs}' if varargs else None,\n '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None,\n ', '.join(kwonlyargs) if kwonlyargs else None,\n ', '.join(f'{k}=\"{v}\"' for k, v in kwonlyargs_optional),\n f'**{varkw}' if varkw else None,\n ]))\n\n\ndef head_from_fun(fun, bound=False, debug=False):\n \"\"\"Generate signature function from actual function.\"\"\"\n # we could use inspect.Signature here, but that implementation\n # is very slow since it implements the argument checking\n # in pure-Python. Instead we use exec to create a new function\n # with an empty body, meaning it has the same performance as\n # as just calling a function.\n is_function = inspect.isfunction(fun)\n is_callable = hasattr(fun, '__call__')\n is_cython = fun.__class__.__name__ == 'cython_function_or_method'\n is_method = inspect.ismethod(fun)\n\n if not is_function and is_callable and not is_method and not is_cython:\n name, fun = fun.__class__.__name__, fun.__call__\n else:\n name = fun.__name__\n definition = FUNHEAD_TEMPLATE.format(\n fun_name=name,\n fun_args=_argsfromspec(inspect.getfullargspec(fun)),\n fun_value=1,\n )\n if debug: # pragma: no cover\n print(definition, file=sys.stderr)\n namespace = {'__name__': fun.__module__}\n # pylint: disable=exec-used\n # Tasks are rarely, if ever, created at runtime - exec here is fine.\n exec(definition, namespace)\n result = namespace[name]\n result._source = definition\n if bound:\n return partial(result, object())\n return result\n\n\ndef arity_greater(fun, n):\n argspec = inspect.getfullargspec(fun)\n return argspec.varargs or len(argspec.args) > n\n\n\ndef fun_takes_argument(name, fun, position=None):\n spec = inspect.getfullargspec(fun)\n return (\n spec.varkw or spec.varargs or\n (len(spec.args) >= position if position else name in spec.args)\n )\n\n\ndef fun_accepts_kwargs(fun):\n \"\"\"Return true if function accepts arbitrary keyword arguments.\"\"\"\n return any(\n p for p in inspect.signature(fun).parameters.values()\n if p.kind == p.VAR_KEYWORD\n )\n\n\ndef maybe(typ, val):\n \"\"\"Call typ on value if val is defined.\"\"\"\n return typ(val) if val is not None else val\n\n\ndef seq_concat_item(seq, item):\n \"\"\"Return copy of sequence seq with item added.\n\n Returns:\n Sequence: if seq is a tuple, the result will be a tuple,\n otherwise it depends on the implementation of ``__add__``.\n \"\"\"\n return seq + (item,) if isinstance(seq, tuple) else seq + [item]\n\n\ndef seq_concat_seq(a, b):\n \"\"\"Concatenate two sequences: ``a + b``.\n\n Returns:\n Sequence: The return value will depend on the largest sequence\n - if b is larger and is a tuple, the return value will be a tuple.\n - if a is larger and is a list, the return value will be a list,\n \"\"\"\n # find the type of the largest sequence\n prefer = type(max([a, b], key=len))\n # convert the smallest list to the type of the largest sequence.\n if not isinstance(a, prefer):\n a = prefer(a)\n if not isinstance(b, prefer):\n b = prefer(b)\n return a + b\n\n\ndef is_numeric_value(value):\n return isinstance(value, (int, float)) and not isinstance(value, bool)\n", "path": "celery/utils/functional.py" } ]
[ { "content": "\"\"\"Functional-style utilities.\"\"\"\nimport inspect\nimport sys\nfrom collections import UserList\nfrom functools import partial\nfrom itertools import islice, tee, zip_longest\n\nfrom kombu.utils.functional import LRUCache, dictfilter, is_list, lazy, maybe_evaluate, maybe_list, memoize\nfrom vine import promise\n\n__all__ = (\n 'LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop',\n 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq',\n 'regen', 'dictfilter', 'lazy', 'maybe_evaluate', 'head_from_fun',\n 'maybe', 'fun_accepts_kwargs',\n)\n\nFUNHEAD_TEMPLATE = \"\"\"\ndef {fun_name}({fun_args}):\n return {fun_value}\n\"\"\"\n\n\nclass DummyContext:\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n pass\n\n\nclass mlazy(lazy):\n \"\"\"Memoized lazy evaluation.\n\n The function is only evaluated once, every subsequent access\n will return the same value.\n \"\"\"\n\n #: Set to :const:`True` after the object has been evaluated.\n evaluated = False\n _value = None\n\n def evaluate(self):\n if not self.evaluated:\n self._value = super().evaluate()\n self.evaluated = True\n return self._value\n\n\ndef noop(*args, **kwargs):\n \"\"\"No operation.\n\n Takes any arguments/keyword arguments and does nothing.\n \"\"\"\n\n\ndef pass1(arg, *args, **kwargs):\n \"\"\"Return the first positional argument.\"\"\"\n return arg\n\n\ndef evaluate_promises(it):\n for value in it:\n if isinstance(value, promise):\n value = value()\n yield value\n\n\ndef first(predicate, it):\n \"\"\"Return the first element in ``it`` that ``predicate`` accepts.\n\n If ``predicate`` is None it will return the first item that's not\n :const:`None`.\n \"\"\"\n return next(\n (v for v in evaluate_promises(it) if (\n predicate(v) if predicate is not None else v is not None)),\n None,\n )\n\n\ndef firstmethod(method, on_call=None):\n \"\"\"Multiple dispatch.\n\n Return a function that with a list of instances,\n finds the first instance that gives a value for the given method.\n\n The list can also contain lazy instances\n (:class:`~kombu.utils.functional.lazy`.)\n \"\"\"\n\n def _matcher(it, *args, **kwargs):\n for obj in it:\n try:\n meth = getattr(maybe_evaluate(obj), method)\n reply = (on_call(meth, *args, **kwargs) if on_call\n else meth(*args, **kwargs))\n except AttributeError:\n pass\n else:\n if reply is not None:\n return reply\n\n return _matcher\n\n\ndef chunks(it, n):\n \"\"\"Split an iterator into chunks with `n` elements each.\n\n Warning:\n ``it`` must be an actual iterator, if you pass this a\n concrete sequence will get you repeating elements.\n\n So ``chunks(iter(range(1000)), 10)`` is fine, but\n ``chunks(range(1000), 10)`` is not.\n\n Example:\n # n == 2\n >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)\n >>> list(x)\n [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]\n\n # n == 3\n >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)\n >>> list(x)\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n \"\"\"\n for item in it:\n yield [item] + list(islice(it, n - 1))\n\n\ndef padlist(container, size, default=None):\n \"\"\"Pad list with default elements.\n\n Example:\n >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3)\n ('George', 'Costanza', 'NYC')\n >>> first, last, city = padlist(['George', 'Costanza'], 3)\n ('George', 'Costanza', None)\n >>> first, last, city, planet = padlist(\n ... ['George', 'Costanza', 'NYC'], 4, default='Earth',\n ... )\n ('George', 'Costanza', 'NYC', 'Earth')\n \"\"\"\n return list(container)[:size] + [default] * (size - len(container))\n\n\ndef mattrgetter(*attrs):\n \"\"\"Get attributes, ignoring attribute errors.\n\n Like :func:`operator.itemgetter` but return :const:`None` on missing\n attributes instead of raising :exc:`AttributeError`.\n \"\"\"\n return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs}\n\n\ndef uniq(it):\n \"\"\"Return all unique elements in ``it``, preserving order.\"\"\"\n seen = set()\n return (seen.add(obj) or obj for obj in it if obj not in seen)\n\n\ndef lookahead(it):\n \"\"\"Yield pairs of (current, next) items in `it`.\n\n `next` is None if `current` is the last item.\n Example:\n >>> list(lookahead(x for x in range(6)))\n [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)]\n \"\"\"\n a, b = tee(it)\n next(b, None)\n return zip_longest(a, b)\n\n\ndef regen(it):\n \"\"\"Convert iterator to an object that can be consumed multiple times.\n\n ``Regen`` takes any iterable, and if the object is an\n generator it will cache the evaluated list on first access,\n so that the generator can be \"consumed\" multiple times.\n \"\"\"\n if isinstance(it, (list, tuple)):\n return it\n return _regen(it)\n\n\nclass _regen(UserList, list):\n # must be subclass of list so that json can encode.\n\n def __init__(self, it):\n # pylint: disable=super-init-not-called\n # UserList creates a new list and sets .data, so we don't\n # want to call init here.\n self.__it = it\n self.__consumed = []\n self.__done = False\n\n def __reduce__(self):\n return list, (self.data,)\n\n def __length_hint__(self):\n return self.__it.__length_hint__()\n\n def __lookahead_consume(self, limit=None):\n if not self.__done and (limit is None or limit > 0):\n it = iter(self.__it)\n try:\n now = next(it)\n except StopIteration:\n return\n self.__consumed.append(now)\n # Maintain a single look-ahead to ensure we set `__done` when the\n # underlying iterator gets exhausted\n while not self.__done:\n try:\n next_ = next(it)\n self.__consumed.append(next_)\n except StopIteration:\n self.__done = True\n break\n finally:\n yield now\n now = next_\n # We can break out when `limit` is exhausted\n if limit is not None:\n limit -= 1\n if limit <= 0:\n break\n\n def __iter__(self):\n yield from self.__consumed\n yield from self.__lookahead_consume()\n\n def __getitem__(self, index):\n if index < 0:\n return self.data[index]\n # Consume elements up to the desired index prior to attempting to\n # access it from within `__consumed`\n consume_count = index - len(self.__consumed) + 1\n for _ in self.__lookahead_consume(limit=consume_count):\n pass\n return self.__consumed[index]\n\n def __bool__(self):\n if len(self.__consumed):\n return True\n\n try:\n next(iter(self))\n except StopIteration:\n return False\n else:\n return True\n\n @property\n def data(self):\n if not self.__done:\n self.__consumed.extend(self.__it)\n self.__done = True\n return self.__consumed\n\n def __repr__(self):\n return \"<{}: [{}{}]>\".format(\n self.__class__.__name__,\n \", \".join(repr(e) for e in self.__consumed),\n \"...\" if not self.__done else \"\",\n )\n\n\ndef _argsfromspec(spec, replace_defaults=True):\n if spec.defaults:\n split = len(spec.defaults)\n defaults = (list(range(len(spec.defaults))) if replace_defaults\n else spec.defaults)\n positional = spec.args[:-split]\n optional = list(zip(spec.args[-split:], defaults))\n else:\n positional, optional = spec.args, []\n\n varargs = spec.varargs\n varkw = spec.varkw\n if spec.kwonlydefaults:\n kwonlyargs = set(spec.kwonlyargs) - set(spec.kwonlydefaults.keys())\n if replace_defaults:\n kwonlyargs_optional = [\n (kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys())\n ]\n else:\n kwonlyargs_optional = list(spec.kwonlydefaults.items())\n else:\n kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, []\n\n return ', '.join(filter(None, [\n ', '.join(positional),\n ', '.join(f'{k}={v}' for k, v in optional),\n f'*{varargs}' if varargs else None,\n '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None,\n ', '.join(kwonlyargs) if kwonlyargs else None,\n ', '.join(f'{k}=\"{v}\"' for k, v in kwonlyargs_optional),\n f'**{varkw}' if varkw else None,\n ]))\n\n\ndef head_from_fun(fun, bound=False, debug=False):\n \"\"\"Generate signature function from actual function.\"\"\"\n # we could use inspect.Signature here, but that implementation\n # is very slow since it implements the argument checking\n # in pure-Python. Instead we use exec to create a new function\n # with an empty body, meaning it has the same performance as\n # as just calling a function.\n is_function = inspect.isfunction(fun)\n is_callable = callable(fun)\n is_cython = fun.__class__.__name__ == 'cython_function_or_method'\n is_method = inspect.ismethod(fun)\n\n if not is_function and is_callable and not is_method and not is_cython:\n name, fun = fun.__class__.__name__, fun.__call__\n else:\n name = fun.__name__\n definition = FUNHEAD_TEMPLATE.format(\n fun_name=name,\n fun_args=_argsfromspec(inspect.getfullargspec(fun)),\n fun_value=1,\n )\n if debug: # pragma: no cover\n print(definition, file=sys.stderr)\n namespace = {'__name__': fun.__module__}\n # pylint: disable=exec-used\n # Tasks are rarely, if ever, created at runtime - exec here is fine.\n exec(definition, namespace)\n result = namespace[name]\n result._source = definition\n if bound:\n return partial(result, object())\n return result\n\n\ndef arity_greater(fun, n):\n argspec = inspect.getfullargspec(fun)\n return argspec.varargs or len(argspec.args) > n\n\n\ndef fun_takes_argument(name, fun, position=None):\n spec = inspect.getfullargspec(fun)\n return (\n spec.varkw or spec.varargs or\n (len(spec.args) >= position if position else name in spec.args)\n )\n\n\ndef fun_accepts_kwargs(fun):\n \"\"\"Return true if function accepts arbitrary keyword arguments.\"\"\"\n return any(\n p for p in inspect.signature(fun).parameters.values()\n if p.kind == p.VAR_KEYWORD\n )\n\n\ndef maybe(typ, val):\n \"\"\"Call typ on value if val is defined.\"\"\"\n return typ(val) if val is not None else val\n\n\ndef seq_concat_item(seq, item):\n \"\"\"Return copy of sequence seq with item added.\n\n Returns:\n Sequence: if seq is a tuple, the result will be a tuple,\n otherwise it depends on the implementation of ``__add__``.\n \"\"\"\n return seq + (item,) if isinstance(seq, tuple) else seq + [item]\n\n\ndef seq_concat_seq(a, b):\n \"\"\"Concatenate two sequences: ``a + b``.\n\n Returns:\n Sequence: The return value will depend on the largest sequence\n - if b is larger and is a tuple, the return value will be a tuple.\n - if a is larger and is a list, the return value will be a list,\n \"\"\"\n # find the type of the largest sequence\n prefer = type(max([a, b], key=len))\n # convert the smallest list to the type of the largest sequence.\n if not isinstance(a, prefer):\n a = prefer(a)\n if not isinstance(b, prefer):\n b = prefer(b)\n return a + b\n\n\ndef is_numeric_value(value):\n return isinstance(value, (int, float)) and not isinstance(value, bool)\n", "path": "celery/utils/functional.py" } ]
diff --git a/celery/utils/functional.py b/celery/utils/functional.py index bcc15a3c788..9402a123658 100644 --- a/celery/utils/functional.py +++ b/celery/utils/functional.py @@ -311,7 +311,7 @@ def head_from_fun(fun, bound=False, debug=False): # with an empty body, meaning it has the same performance as # as just calling a function. is_function = inspect.isfunction(fun) - is_callable = hasattr(fun, '__call__') + is_callable = callable(fun) is_cython = fun.__class__.__name__ == 'cython_function_or_method' is_method = inspect.ismethod(fun)
Use callable in utils.functional.head_from_fun After the discussion in #3952, we should investigate whether an improvement can be applied using the builtin `callable` instead of `hasattr(fun, '__call__')`. https://docs.python.org/2/library/functions.html#callable
translate__pootle-4270
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport datetime\nimport logging\n\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, NoArgsCommand\n\nfrom pootle.runner import set_sync_mode\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\n\n\nclass PootleCommand(NoArgsCommand):\n \"\"\"Base class for handling recursive pootle store management commands.\"\"\"\n\n shared_option_list = (\n make_option(\n '--project',\n action='append',\n dest='projects',\n help='Project to refresh',\n ),\n make_option(\n '--language',\n action='append',\n dest='languages',\n help='Language to refresh',\n ),\n make_option(\n \"--noinput\",\n action=\"store_true\",\n default=False,\n help=u\"Never prompt for input\",\n ),\n make_option(\n \"--no-rq\",\n action=\"store_true\",\n default=False,\n help=(u\"Run all jobs in a single process, without \"\n \"using rq workers\"),\n ),\n )\n option_list = NoArgsCommand.option_list + shared_option_list\n process_disabled_projects = False\n\n def __init__(self, *args, **kwargs):\n self.languages = []\n self.projects = []\n super(PootleCommand, self).__init__(*args, **kwargs)\n\n def do_translation_project(self, tp, **options):\n process_stores = True\n\n if hasattr(self, \"handle_translation_project\"):\n logging.info(u\"Running %s over %s\", self.name, tp)\n try:\n process_stores = self.handle_translation_project(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\", self.name, tp)\n return\n\n if not process_stores:\n return\n\n if hasattr(self, \"handle_all_stores\"):\n logging.info(u\"Running %s over %s's files\", self.name, tp)\n try:\n self.handle_all_stores(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s's files\",\n self.name, tp)\n return\n elif hasattr(self, \"handle_store\"):\n store_query = tp.stores.live()\n for store in store_query.iterator():\n logging.info(u\"Running %s over %s\",\n self.name, store.pootle_path)\n try:\n self.handle_store(store, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\",\n self.name, store.pootle_path)\n\n def handle_noargs(self, **options):\n # adjust debug level to the verbosity option\n verbosity = int(options.get('verbosity', 1))\n debug_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG\n }\n debug_level = debug_levels.get(verbosity, logging.DEBUG)\n logging.getLogger().setLevel(debug_level)\n\n # reduce size of parse pool early on\n self.name = self.__class__.__module__.split('.')[-1]\n from pootle_store.fields import TranslationStoreFieldFile\n TranslationStoreFieldFile._store_cache.maxsize = 2\n TranslationStoreFieldFile._store_cache.cullsize = 2\n TranslationProject._non_db_state_cache.maxsize = 2\n TranslationProject._non_db_state_cache.cullsize = 2\n\n self.projects = options.pop('projects', [])\n self.languages = options.pop('languages', [])\n\n # info start\n start = datetime.datetime.now()\n logging.info('Start running of %s', self.name)\n\n self.handle_all(**options)\n\n # info finish\n end = datetime.datetime.now()\n logging.info('All done for %s in %s', self.name, end - start)\n\n def handle_all(self, **options):\n if options.get(\"no_rq\", False):\n set_sync_mode(options.get('noinput', False))\n\n if self.process_disabled_projects:\n project_query = Project.objects.all()\n else:\n project_query = Project.objects.enabled()\n\n if self.projects:\n project_query = project_query.filter(code__in=self.projects)\n\n for project in project_query.iterator():\n tp_query = project.translationproject_set \\\n .order_by('language__code')\n\n if self.languages:\n tp_query = tp_query.filter(language__code__in=self.languages)\n\n for tp in tp_query.iterator():\n self.do_translation_project(tp, **options)\n\n\nclass BaseRunCommand(BaseCommand):\n \"\"\"Base class to build new server runners.\n\n Based on code from `django-shoes\n <https://bitbucket.org/mlzboy/django-shoes/>`_.\n \"\"\"\n\n hostport_option_list = (\n make_option(\n '--host',\n action='store',\n dest='host',\n default='127.0.0.1',\n help='Hostname to listen on.',\n ),\n make_option(\n '--port',\n action='store',\n dest='port',\n default=8000,\n type=int,\n help='The TCP port to listen on.',\n ),\n )\n\n option_list = BaseCommand.option_list + hostport_option_list\n\n def handle(self, *args, **options):\n return self.serve_forever(*args, **options)\n\n def get_app(self):\n from django.contrib.staticfiles.handlers import StaticFilesHandler\n from django.core.handlers.wsgi import WSGIHandler\n\n app = StaticFilesHandler(WSGIHandler())\n return app\n\n def serve_forever(self, *args, **kwargs):\n raise NotImplementedError\n", "path": "pootle/apps/pootle_app/management/commands/__init__.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport datetime\nimport logging\n\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, NoArgsCommand\n\nfrom pootle.runner import set_sync_mode\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\n\n\nclass PootleCommand(NoArgsCommand):\n \"\"\"Base class for handling recursive pootle store management commands.\"\"\"\n\n shared_option_list = (\n make_option(\n '--project',\n action='append',\n dest='projects',\n help='Project to refresh',\n ),\n make_option(\n '--language',\n action='append',\n dest='languages',\n help='Language to refresh',\n ),\n make_option(\n \"--noinput\",\n action=\"store_true\",\n default=False,\n help=u\"Never prompt for input\",\n ),\n make_option(\n \"--no-rq\",\n action=\"store_true\",\n default=False,\n help=(u\"Run all jobs in a single process, without \"\n \"using rq workers\"),\n ),\n )\n option_list = NoArgsCommand.option_list + shared_option_list\n process_disabled_projects = False\n\n def __init__(self, *args, **kwargs):\n self.languages = []\n self.projects = []\n super(PootleCommand, self).__init__(*args, **kwargs)\n\n def do_translation_project(self, tp, **options):\n process_stores = True\n\n if hasattr(self, \"handle_translation_project\"):\n logging.info(u\"Running %s over %s\", self.name, tp)\n try:\n process_stores = self.handle_translation_project(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\", self.name, tp)\n return\n\n if not process_stores:\n return\n\n if hasattr(self, \"handle_all_stores\"):\n logging.info(u\"Running %s over %s's files\", self.name, tp)\n try:\n self.handle_all_stores(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s's files\",\n self.name, tp)\n return\n elif hasattr(self, \"handle_store\"):\n store_query = tp.stores.live()\n for store in store_query.iterator():\n logging.info(u\"Running %s over %s\",\n self.name, store.pootle_path)\n try:\n self.handle_store(store, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\",\n self.name, store.pootle_path)\n\n def handle_noargs(self, **options):\n # adjust debug level to the verbosity option\n verbosity = int(options.get('verbosity', 1))\n debug_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG\n }\n debug_level = debug_levels.get(verbosity, logging.DEBUG)\n logging.getLogger().setLevel(debug_level)\n\n # reduce size of parse pool early on\n self.name = self.__class__.__module__.split('.')[-1]\n from pootle_store.fields import TranslationStoreFieldFile\n TranslationStoreFieldFile._store_cache.maxsize = 2\n TranslationStoreFieldFile._store_cache.cullsize = 2\n TranslationProject._non_db_state_cache.maxsize = 2\n TranslationProject._non_db_state_cache.cullsize = 2\n\n self.projects = options.pop('projects', [])\n self.languages = options.pop('languages', [])\n\n # info start\n start = datetime.datetime.now()\n logging.info('Start running of %s', self.name)\n\n self.handle_all(**options)\n\n # info finish\n end = datetime.datetime.now()\n logging.info('All done for %s in %s', self.name, end - start)\n\n def handle_all(self, **options):\n if options.get(\"no_rq\", False):\n set_sync_mode(options.get('noinput', False))\n\n if self.process_disabled_projects:\n project_query = Project.objects.all()\n else:\n project_query = Project.objects.enabled()\n\n if self.projects:\n project_query = project_query.filter(code__in=self.projects)\n\n for project in project_query.iterator():\n tp_query = project.translationproject_set.live() \\\n .order_by('language__code')\n\n if self.languages:\n tp_query = tp_query.filter(language__code__in=self.languages)\n\n for tp in tp_query.iterator():\n self.do_translation_project(tp, **options)\n\n\nclass BaseRunCommand(BaseCommand):\n \"\"\"Base class to build new server runners.\n\n Based on code from `django-shoes\n <https://bitbucket.org/mlzboy/django-shoes/>`_.\n \"\"\"\n\n hostport_option_list = (\n make_option(\n '--host',\n action='store',\n dest='host',\n default='127.0.0.1',\n help='Hostname to listen on.',\n ),\n make_option(\n '--port',\n action='store',\n dest='port',\n default=8000,\n type=int,\n help='The TCP port to listen on.',\n ),\n )\n\n option_list = BaseCommand.option_list + hostport_option_list\n\n def handle(self, *args, **options):\n return self.serve_forever(*args, **options)\n\n def get_app(self):\n from django.contrib.staticfiles.handlers import StaticFilesHandler\n from django.core.handlers.wsgi import WSGIHandler\n\n app = StaticFilesHandler(WSGIHandler())\n return app\n\n def serve_forever(self, *args, **kwargs):\n raise NotImplementedError\n", "path": "pootle/apps/pootle_app/management/commands/__init__.py" } ]
diff --git a/pootle/apps/pootle_app/management/commands/__init__.py b/pootle/apps/pootle_app/management/commands/__init__.py index 6ff99e222bb..eaba4eaf6e7 100644 --- a/pootle/apps/pootle_app/management/commands/__init__.py +++ b/pootle/apps/pootle_app/management/commands/__init__.py @@ -136,7 +136,7 @@ def handle_all(self, **options): project_query = project_query.filter(code__in=self.projects) for project in project_query.iterator(): - tp_query = project.translationproject_set \ + tp_query = project.translationproject_set.live() \ .order_by('language__code') if self.languages:
PootleCommand handles obsolete translation projects `update_stores` command can make obsolete TP directory many times. It doesn't affect us much except we get extra unnecessary log messages like this: ``` set(['get_last_updated', 'get_checks', 'get_mtime', 'get_suggestion_count', 'get_last_action', 'get_wordcount_stats']) deleted from /uk/android_announcements_evernote/ cache ``` which tells us that cache for TP has been deleted. I think any `PootleCommand` shouldn't handle TP with obsolete directories. `update_stores` command will resurrect TP directory when it's added.
Pylons__pyramid-3076
[ { "content": "from zope.deprecation import deprecated\nfrom zope.interface import providedBy\n\nfrom pyramid.interfaces import (\n IAuthenticationPolicy,\n IAuthorizationPolicy,\n ISecuredView,\n IView,\n IViewClassifier,\n )\n\nfrom pyramid.compat import map_\nfrom pyramid.threadlocal import get_current_registry\n\nEveryone = 'system.Everyone'\nAuthenticated = 'system.Authenticated'\nAllow = 'Allow'\nDeny = 'Deny'\n\n_marker = object()\n\nclass AllPermissionsList(object):\n \"\"\" Stand in 'permission list' to represent all permissions \"\"\"\n def __iter__(self):\n return ()\n def __contains__(self, other):\n return True\n def __eq__(self, other):\n return isinstance(other, self.__class__)\n\nALL_PERMISSIONS = AllPermissionsList()\nDENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)\n\nNO_PERMISSION_REQUIRED = '__no_permission_required__'\n\ndef _get_registry(request):\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n return reg\n\ndef _get_authentication_policy(request):\n registry = _get_registry(request)\n return registry.queryUtility(IAuthenticationPolicy)\n\ndef has_permission(permission, context, request):\n \"\"\"\n A function that calls :meth:`pyramid.request.Request.has_permission`\n and returns its result.\n \n .. deprecated:: 1.5\n Use :meth:`pyramid.request.Request.has_permission` instead.\n\n .. versionchanged:: 1.5a3\n If context is None, then attempt to use the context attribute of self;\n if not set, then the AttributeError is propagated.\n \"\"\" \n return request.has_permission(permission, context)\n\ndeprecated(\n 'has_permission',\n 'As of Pyramid 1.5 the \"pyramid.security.has_permission\" API is now '\n 'deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"has_permission\" method of the Pyramid request instead.'\n )\n\n\ndef authenticated_userid(request):\n \"\"\"\n A function that returns the value of the property\n :attr:`pyramid.request.Request.authenticated_userid`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.authenticated_userid` instead.\n \"\"\" \n return request.authenticated_userid\n\ndeprecated(\n 'authenticated_userid',\n 'As of Pyramid 1.5 the \"pyramid.security.authenticated_userid\" API is now '\n 'deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"authenticated_userid\" attribute of the Pyramid request instead.'\n )\n\ndef unauthenticated_userid(request):\n \"\"\" \n A function that returns the value of the property\n :attr:`pyramid.request.Request.unauthenticated_userid`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.unauthenticated_userid` instead.\n \"\"\" \n return request.unauthenticated_userid\n\ndeprecated(\n 'unauthenticated_userid',\n 'As of Pyramid 1.5 the \"pyramid.security.unauthenticated_userid\" API is '\n 'now deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"unauthenticated_userid\" attribute of the Pyramid request instead.'\n )\n\ndef effective_principals(request):\n \"\"\"\n A function that returns the value of the property\n :attr:`pyramid.request.Request.effective_principals`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.effective_principals` instead.\n \"\"\" \n return request.effective_principals\n\ndeprecated(\n 'effective_principals',\n 'As of Pyramid 1.5 the \"pyramid.security.effective_principals\" API is '\n 'now deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"effective_principals\" attribute of the Pyramid request instead.'\n )\n\ndef remember(request, userid=_marker, **kw):\n \"\"\"\n Returns a sequence of header tuples (e.g. ``[('Set-Cookie', 'foo=abc')]``)\n on this request's response.\n These headers are suitable for 'remembering' a set of credentials\n implied by the data passed as ``userid`` and ``*kw`` using the\n current :term:`authentication policy`. Common usage might look\n like so within the body of a view function (``response`` is\n assumed to be a :term:`WebOb` -style :term:`response` object\n computed previously by the view code):\n\n .. code-block:: python\n\n from pyramid.security import remember\n headers = remember(request, 'chrism', password='123', max_age='86400')\n response = request.response\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence. If used, the composition and\n meaning of ``**kw`` must be agreed upon by the calling code and\n the effective authentication policy.\n \n .. deprecated:: 1.6\n Renamed the ``principal`` argument to ``userid`` to clarify its\n purpose.\n \"\"\"\n if userid is _marker:\n principal = kw.pop('principal', _marker)\n if principal is _marker:\n raise TypeError(\n 'remember() missing 1 required positional argument: '\n '\\'userid\\'')\n else:\n deprecated(\n 'principal',\n 'The \"principal\" argument was deprecated in Pyramid 1.6. '\n 'It will be removed in Pyramid 1.9. Use the \"userid\" '\n 'argument instead.')\n userid = principal\n policy = _get_authentication_policy(request)\n if policy is None:\n return []\n return policy.remember(request, userid, **kw)\n\ndef forget(request):\n \"\"\"\n Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'forgetting' the set of credentials\n possessed by the currently authenticated user. A common usage\n might look like so within the body of a view function\n (``response`` is assumed to be an :term:`WebOb` -style\n :term:`response` object computed previously by the view code):\n\n .. code-block:: python\n\n from pyramid.security import forget\n headers = forget(request)\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence.\n \"\"\" \n policy = _get_authentication_policy(request)\n if policy is None:\n return []\n return policy.forget(request)\n\ndef principals_allowed_by_permission(context, permission):\n \"\"\" Provided a ``context`` (a resource object), and a ``permission``\n (a string or unicode object), if a :term:`authorization policy` is\n in effect, return a sequence of :term:`principal` ids that possess\n the permission in the ``context``. If no authorization policy is\n in effect, this will return a sequence with the single value\n :mod:`pyramid.security.Everyone` (the special principal\n identifier representing all principals).\n\n .. note::\n\n even if an :term:`authorization policy` is in effect,\n some (exotic) authorization policies may not implement the\n required machinery for this function; those will cause a\n :exc:`NotImplementedError` exception to be raised when this\n function is invoked.\n \"\"\"\n reg = get_current_registry()\n policy = reg.queryUtility(IAuthorizationPolicy)\n if policy is None:\n return [Everyone]\n return policy.principals_allowed_by_permission(context, permission)\n\ndef view_execution_permitted(context, request, name=''):\n \"\"\" If the view specified by ``context`` and ``name`` is protected\n by a :term:`permission`, check the permission associated with the\n view using the effective authentication/authorization policies and\n the ``request``. Return a boolean result. If no\n :term:`authorization policy` is in effect, or if the view is not\n protected by a permission, return ``True``. If no view can view found,\n an exception will be raised.\n\n .. versionchanged:: 1.4a4\n An exception is raised if no view is found.\n\n \"\"\"\n reg = _get_registry(request)\n provides = [IViewClassifier] + map_(providedBy, (request, context))\n # XXX not sure what to do here about using _find_views or analogue;\n # for now let's just keep it as-is\n view = reg.adapters.lookup(provides, ISecuredView, name=name)\n if view is None:\n view = reg.adapters.lookup(provides, IView, name=name)\n if view is None:\n raise TypeError('No registered view satisfies the constraints. '\n 'It would not make sense to claim that this view '\n '\"is\" or \"is not\" permitted.')\n return Allowed(\n 'Allowed: view name %r in context %r (no permission defined)' %\n (name, context))\n return view.__permitted__(context, request)\n\n\nclass PermitsResult(int):\n def __new__(cls, s, *args):\n inst = int.__new__(cls, cls.boolval)\n inst.s = s\n inst.args = args\n return inst\n\n @property\n def msg(self):\n return self.s % self.args\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass Denied(PermitsResult):\n \"\"\" An instance of ``Denied`` is returned when a security-related\n API or other :app:`Pyramid` code denies an action unrelated to\n an ACL check. It evaluates equal to all boolean false types. It\n has an attribute named ``msg`` describing the circumstances for\n the deny.\"\"\"\n boolval = 0\n\nclass Allowed(PermitsResult):\n \"\"\" An instance of ``Allowed`` is returned when a security-related\n API or other :app:`Pyramid` code allows an action unrelated to\n an ACL check. It evaluates equal to all boolean true types. It\n has an attribute named ``msg`` describing the circumstances for\n the allow.\"\"\"\n boolval = 1\n\nclass ACLPermitsResult(int):\n def __new__(cls, ace, acl, permission, principals, context):\n inst = int.__new__(cls, cls.boolval)\n inst.permission = permission\n inst.ace = ace\n inst.acl = acl\n inst.principals = principals\n inst.context = context\n return inst\n\n @property\n def msg(self):\n s = ('%s permission %r via ACE %r in ACL %r on context %r for '\n 'principals %r')\n return s % (self.__class__.__name__,\n self.permission,\n self.ace,\n self.acl,\n self.context,\n self.principals)\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass ACLDenied(ACLPermitsResult):\n \"\"\" An instance of ``ACLDenied`` represents that a security check made\n explicitly against ACL was denied. It evaluates equal to all boolean\n false types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 0\n\nclass ACLAllowed(ACLPermitsResult):\n \"\"\" An instance of ``ACLAllowed`` represents that a security check made\n explicitly against ACL was allowed. It evaluates equal to all boolean\n true types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 1\n\nclass AuthenticationAPIMixin(object):\n\n def _get_authentication_policy(self):\n reg = _get_registry(self)\n return reg.queryUtility(IAuthenticationPolicy)\n\n @property\n def authenticated_userid(self):\n \"\"\" Return the userid of the currently authenticated user or\n ``None`` if there is no :term:`authentication policy` in effect or\n there is no currently authenticated user.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return None\n return policy.authenticated_userid(self)\n\n @property\n def unauthenticated_userid(self):\n \"\"\" Return an object which represents the *claimed* (not verified) user\n id of the credentials present in the request. ``None`` if there is no\n :term:`authentication policy` in effect or there is no user data\n associated with the current request. This differs from\n :attr:`~pyramid.request.Request.authenticated_userid`, because the\n effective authentication policy will not ensure that a record\n associated with the userid exists in persistent storage.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return None\n return policy.unauthenticated_userid(self)\n\n @property\n def effective_principals(self):\n \"\"\" Return the list of 'effective' :term:`principal` identifiers\n for the ``request``. If no :term:`authentication policy` is in effect,\n this will return a one-element list containing the\n :data:`pyramid.security.Everyone` principal.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return [Everyone]\n return policy.effective_principals(self)\n\nclass AuthorizationAPIMixin(object):\n\n def has_permission(self, permission, context=None):\n \"\"\" Given a permission and an optional context, returns an instance of\n :data:`pyramid.security.Allowed` if the permission is granted to this\n request with the provided context, or the context already associated\n with the request. Otherwise, returns an instance of\n :data:`pyramid.security.Denied`. This method delegates to the current\n authentication and authorization policies. Returns\n :data:`pyramid.security.Allowed` unconditionally if no authentication\n policy has been registered for this request. If ``context`` is not\n supplied or is supplied as ``None``, the context used is the\n ``request.context`` attribute.\n\n :param permission: Does this request have the given permission?\n :type permission: unicode, str\n :param context: A resource object or ``None``\n :type context: object\n :returns: `pyramid.security.PermitsResult`\n\n .. versionadded:: 1.5\n\n \"\"\"\n if context is None:\n context = self.context\n reg = _get_registry(self)\n authn_policy = reg.queryUtility(IAuthenticationPolicy)\n if authn_policy is None:\n return Allowed('No authentication policy in use.')\n authz_policy = reg.queryUtility(IAuthorizationPolicy)\n if authz_policy is None:\n raise ValueError('Authentication policy registered without '\n 'authorization policy') # should never happen\n principals = authn_policy.effective_principals(self)\n return authz_policy.permits(context, principals, permission)\n", "path": "pyramid/security.py" } ]
[ { "content": "from zope.deprecation import deprecated\nfrom zope.interface import providedBy\n\nfrom pyramid.interfaces import (\n IAuthenticationPolicy,\n IAuthorizationPolicy,\n ISecuredView,\n IView,\n IViewClassifier,\n )\n\nfrom pyramid.compat import map_\nfrom pyramid.threadlocal import get_current_registry\n\nEveryone = 'system.Everyone'\nAuthenticated = 'system.Authenticated'\nAllow = 'Allow'\nDeny = 'Deny'\n\n_marker = object()\n\nclass AllPermissionsList(object):\n \"\"\" Stand in 'permission list' to represent all permissions \"\"\"\n\n def __iter__(self):\n return iter(())\n\n def __contains__(self, other):\n return True\n\n def __eq__(self, other):\n return isinstance(other, self.__class__)\n\nALL_PERMISSIONS = AllPermissionsList()\nDENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)\n\nNO_PERMISSION_REQUIRED = '__no_permission_required__'\n\ndef _get_registry(request):\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n return reg\n\ndef _get_authentication_policy(request):\n registry = _get_registry(request)\n return registry.queryUtility(IAuthenticationPolicy)\n\ndef has_permission(permission, context, request):\n \"\"\"\n A function that calls :meth:`pyramid.request.Request.has_permission`\n and returns its result.\n \n .. deprecated:: 1.5\n Use :meth:`pyramid.request.Request.has_permission` instead.\n\n .. versionchanged:: 1.5a3\n If context is None, then attempt to use the context attribute of self;\n if not set, then the AttributeError is propagated.\n \"\"\" \n return request.has_permission(permission, context)\n\ndeprecated(\n 'has_permission',\n 'As of Pyramid 1.5 the \"pyramid.security.has_permission\" API is now '\n 'deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"has_permission\" method of the Pyramid request instead.'\n )\n\n\ndef authenticated_userid(request):\n \"\"\"\n A function that returns the value of the property\n :attr:`pyramid.request.Request.authenticated_userid`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.authenticated_userid` instead.\n \"\"\" \n return request.authenticated_userid\n\ndeprecated(\n 'authenticated_userid',\n 'As of Pyramid 1.5 the \"pyramid.security.authenticated_userid\" API is now '\n 'deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"authenticated_userid\" attribute of the Pyramid request instead.'\n )\n\ndef unauthenticated_userid(request):\n \"\"\" \n A function that returns the value of the property\n :attr:`pyramid.request.Request.unauthenticated_userid`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.unauthenticated_userid` instead.\n \"\"\" \n return request.unauthenticated_userid\n\ndeprecated(\n 'unauthenticated_userid',\n 'As of Pyramid 1.5 the \"pyramid.security.unauthenticated_userid\" API is '\n 'now deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"unauthenticated_userid\" attribute of the Pyramid request instead.'\n )\n\ndef effective_principals(request):\n \"\"\"\n A function that returns the value of the property\n :attr:`pyramid.request.Request.effective_principals`.\n \n .. deprecated:: 1.5\n Use :attr:`pyramid.request.Request.effective_principals` instead.\n \"\"\" \n return request.effective_principals\n\ndeprecated(\n 'effective_principals',\n 'As of Pyramid 1.5 the \"pyramid.security.effective_principals\" API is '\n 'now deprecated. It will be removed in Pyramid 1.8. Use the '\n '\"effective_principals\" attribute of the Pyramid request instead.'\n )\n\ndef remember(request, userid=_marker, **kw):\n \"\"\"\n Returns a sequence of header tuples (e.g. ``[('Set-Cookie', 'foo=abc')]``)\n on this request's response.\n These headers are suitable for 'remembering' a set of credentials\n implied by the data passed as ``userid`` and ``*kw`` using the\n current :term:`authentication policy`. Common usage might look\n like so within the body of a view function (``response`` is\n assumed to be a :term:`WebOb` -style :term:`response` object\n computed previously by the view code):\n\n .. code-block:: python\n\n from pyramid.security import remember\n headers = remember(request, 'chrism', password='123', max_age='86400')\n response = request.response\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence. If used, the composition and\n meaning of ``**kw`` must be agreed upon by the calling code and\n the effective authentication policy.\n \n .. deprecated:: 1.6\n Renamed the ``principal`` argument to ``userid`` to clarify its\n purpose.\n \"\"\"\n if userid is _marker:\n principal = kw.pop('principal', _marker)\n if principal is _marker:\n raise TypeError(\n 'remember() missing 1 required positional argument: '\n '\\'userid\\'')\n else:\n deprecated(\n 'principal',\n 'The \"principal\" argument was deprecated in Pyramid 1.6. '\n 'It will be removed in Pyramid 1.9. Use the \"userid\" '\n 'argument instead.')\n userid = principal\n policy = _get_authentication_policy(request)\n if policy is None:\n return []\n return policy.remember(request, userid, **kw)\n\ndef forget(request):\n \"\"\"\n Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'forgetting' the set of credentials\n possessed by the currently authenticated user. A common usage\n might look like so within the body of a view function\n (``response`` is assumed to be an :term:`WebOb` -style\n :term:`response` object computed previously by the view code):\n\n .. code-block:: python\n\n from pyramid.security import forget\n headers = forget(request)\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence.\n \"\"\" \n policy = _get_authentication_policy(request)\n if policy is None:\n return []\n return policy.forget(request)\n\ndef principals_allowed_by_permission(context, permission):\n \"\"\" Provided a ``context`` (a resource object), and a ``permission``\n (a string or unicode object), if a :term:`authorization policy` is\n in effect, return a sequence of :term:`principal` ids that possess\n the permission in the ``context``. If no authorization policy is\n in effect, this will return a sequence with the single value\n :mod:`pyramid.security.Everyone` (the special principal\n identifier representing all principals).\n\n .. note::\n\n even if an :term:`authorization policy` is in effect,\n some (exotic) authorization policies may not implement the\n required machinery for this function; those will cause a\n :exc:`NotImplementedError` exception to be raised when this\n function is invoked.\n \"\"\"\n reg = get_current_registry()\n policy = reg.queryUtility(IAuthorizationPolicy)\n if policy is None:\n return [Everyone]\n return policy.principals_allowed_by_permission(context, permission)\n\ndef view_execution_permitted(context, request, name=''):\n \"\"\" If the view specified by ``context`` and ``name`` is protected\n by a :term:`permission`, check the permission associated with the\n view using the effective authentication/authorization policies and\n the ``request``. Return a boolean result. If no\n :term:`authorization policy` is in effect, or if the view is not\n protected by a permission, return ``True``. If no view can view found,\n an exception will be raised.\n\n .. versionchanged:: 1.4a4\n An exception is raised if no view is found.\n\n \"\"\"\n reg = _get_registry(request)\n provides = [IViewClassifier] + map_(providedBy, (request, context))\n # XXX not sure what to do here about using _find_views or analogue;\n # for now let's just keep it as-is\n view = reg.adapters.lookup(provides, ISecuredView, name=name)\n if view is None:\n view = reg.adapters.lookup(provides, IView, name=name)\n if view is None:\n raise TypeError('No registered view satisfies the constraints. '\n 'It would not make sense to claim that this view '\n '\"is\" or \"is not\" permitted.')\n return Allowed(\n 'Allowed: view name %r in context %r (no permission defined)' %\n (name, context))\n return view.__permitted__(context, request)\n\n\nclass PermitsResult(int):\n def __new__(cls, s, *args):\n inst = int.__new__(cls, cls.boolval)\n inst.s = s\n inst.args = args\n return inst\n\n @property\n def msg(self):\n return self.s % self.args\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass Denied(PermitsResult):\n \"\"\" An instance of ``Denied`` is returned when a security-related\n API or other :app:`Pyramid` code denies an action unrelated to\n an ACL check. It evaluates equal to all boolean false types. It\n has an attribute named ``msg`` describing the circumstances for\n the deny.\"\"\"\n boolval = 0\n\nclass Allowed(PermitsResult):\n \"\"\" An instance of ``Allowed`` is returned when a security-related\n API or other :app:`Pyramid` code allows an action unrelated to\n an ACL check. It evaluates equal to all boolean true types. It\n has an attribute named ``msg`` describing the circumstances for\n the allow.\"\"\"\n boolval = 1\n\nclass ACLPermitsResult(int):\n def __new__(cls, ace, acl, permission, principals, context):\n inst = int.__new__(cls, cls.boolval)\n inst.permission = permission\n inst.ace = ace\n inst.acl = acl\n inst.principals = principals\n inst.context = context\n return inst\n\n @property\n def msg(self):\n s = ('%s permission %r via ACE %r in ACL %r on context %r for '\n 'principals %r')\n return s % (self.__class__.__name__,\n self.permission,\n self.ace,\n self.acl,\n self.context,\n self.principals)\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass ACLDenied(ACLPermitsResult):\n \"\"\" An instance of ``ACLDenied`` represents that a security check made\n explicitly against ACL was denied. It evaluates equal to all boolean\n false types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 0\n\nclass ACLAllowed(ACLPermitsResult):\n \"\"\" An instance of ``ACLAllowed`` represents that a security check made\n explicitly against ACL was allowed. It evaluates equal to all boolean\n true types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 1\n\nclass AuthenticationAPIMixin(object):\n\n def _get_authentication_policy(self):\n reg = _get_registry(self)\n return reg.queryUtility(IAuthenticationPolicy)\n\n @property\n def authenticated_userid(self):\n \"\"\" Return the userid of the currently authenticated user or\n ``None`` if there is no :term:`authentication policy` in effect or\n there is no currently authenticated user.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return None\n return policy.authenticated_userid(self)\n\n @property\n def unauthenticated_userid(self):\n \"\"\" Return an object which represents the *claimed* (not verified) user\n id of the credentials present in the request. ``None`` if there is no\n :term:`authentication policy` in effect or there is no user data\n associated with the current request. This differs from\n :attr:`~pyramid.request.Request.authenticated_userid`, because the\n effective authentication policy will not ensure that a record\n associated with the userid exists in persistent storage.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return None\n return policy.unauthenticated_userid(self)\n\n @property\n def effective_principals(self):\n \"\"\" Return the list of 'effective' :term:`principal` identifiers\n for the ``request``. If no :term:`authentication policy` is in effect,\n this will return a one-element list containing the\n :data:`pyramid.security.Everyone` principal.\n\n .. versionadded:: 1.5\n \"\"\"\n policy = self._get_authentication_policy()\n if policy is None:\n return [Everyone]\n return policy.effective_principals(self)\n\nclass AuthorizationAPIMixin(object):\n\n def has_permission(self, permission, context=None):\n \"\"\" Given a permission and an optional context, returns an instance of\n :data:`pyramid.security.Allowed` if the permission is granted to this\n request with the provided context, or the context already associated\n with the request. Otherwise, returns an instance of\n :data:`pyramid.security.Denied`. This method delegates to the current\n authentication and authorization policies. Returns\n :data:`pyramid.security.Allowed` unconditionally if no authentication\n policy has been registered for this request. If ``context`` is not\n supplied or is supplied as ``None``, the context used is the\n ``request.context`` attribute.\n\n :param permission: Does this request have the given permission?\n :type permission: unicode, str\n :param context: A resource object or ``None``\n :type context: object\n :returns: `pyramid.security.PermitsResult`\n\n .. versionadded:: 1.5\n\n \"\"\"\n if context is None:\n context = self.context\n reg = _get_registry(self)\n authn_policy = reg.queryUtility(IAuthenticationPolicy)\n if authn_policy is None:\n return Allowed('No authentication policy in use.')\n authz_policy = reg.queryUtility(IAuthorizationPolicy)\n if authz_policy is None:\n raise ValueError('Authentication policy registered without '\n 'authorization policy') # should never happen\n principals = authn_policy.effective_principals(self)\n return authz_policy.permits(context, principals, permission)\n", "path": "pyramid/security.py" } ]
diff --git a/pyramid/security.py b/pyramid/security.py index 82e6b73a91..035f09f775 100644 --- a/pyramid/security.py +++ b/pyramid/security.py @@ -21,10 +21,13 @@ class AllPermissionsList(object): """ Stand in 'permission list' to represent all permissions """ + def __iter__(self): - return () + return iter(()) + def __contains__(self, other): return True + def __eq__(self, other): return isinstance(other, self.__class__) diff --git a/pyramid/tests/test_security.py b/pyramid/tests/test_security.py index 6d75ac8e3d..5561a05d71 100644 --- a/pyramid/tests/test_security.py +++ b/pyramid/tests/test_security.py @@ -16,12 +16,32 @@ def _getTargetClass(self): def _makeOne(self): return self._getTargetClass()() - def test_it(self): + def test_equality_w_self(self): thing = self._makeOne() self.assertTrue(thing.__eq__(thing)) - self.assertEqual(thing.__iter__(), ()) + + def test_equality_w_other_instances_of_class(self): + thing = self._makeOne() + other = self._makeOne() + self.assertTrue(thing.__eq__(other)) + + def test_equality_miss(self): + thing = self._makeOne() + other = object() + self.assertFalse(thing.__eq__(other)) + + def test_contains_w_string(self): + thing = self._makeOne() self.assertTrue('anything' in thing) + def test_contains_w_object(self): + thing = self._makeOne() + self.assertTrue(object() in thing) + + def test_iterable(self): + thing = self._makeOne() + self.assertEqual(list(thing), []) + def test_singleton(self): from pyramid.security import ALL_PERMISSIONS self.assertEqual(ALL_PERMISSIONS.__class__, self._getTargetClass())
`AllPermissionsList.__iter__` returns tuple(). Should return iter(tuple())? My understanding is that __iter__ should [return an iterator object](https://docs.python.org/3.5/library/stdtypes.html#container.__iter__). However, [`AllPermissionsList.__iter__`](https://github.com/Pylons/pyramid/blob/master/pyramid/security.py#L25) returns `()`. Indeed, this raises a TypeError as expected: ``` In [1]: from pyramid.security import ALL_PERMISSIONS In [2]: iter(ALL_PERMISSIONS) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-22-f8a3d5a1d337> in <module>() ----> 1 iter(ALL_PERMISSIONS) TypeError: iter() returned non-iterator of type 'tuple' ``` Lastly, I don't see where this method is used in Pyramid. Should this code (the `__iter__` method on `AllPermissionsList`) even exist?
conan-io__conan-center-index-789
[ { "content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass BackwardCppConan(ConanFile):\n name = \"backward-cpp\"\n description = \"A beautiful stack trace pretty printer for C++\"\n homepage = \"https://github.com/bombela/backward-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"conan\", \"backward-cpp\", \"stack-trace\")\n license = \"MIT\"\n exports_sources = [ \"CMakeLists.txt\", \"patches/backward-cpp-*.patch\" ]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"stack_walking\" : [\"unwind\", \"backtrace\"],\n \"stack_details\" : [\"dw\", \"bfd\", \"dwarf\", \"backtrace_symbol\"],\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"stack_walking\": \"unwind\",\n \"stack_details\": \"dwarf\",\n \"shared\": True,\n \"fPIC\": True\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def _has_stack_walking(self, type):\n return self.options.stack_walking == type\n\n def _has_stack_details(self, type):\n return self.options.stack_details == type\n \n def configure(self):\n if self.settings.os not in [\"Linux\", \"Macos\", \"Android\"]:\n raise ConanInvalidConfiguration(\"upstream backward-cpp v{0} is not \\\n supported in {1}.\".format(self.version, self.settings.os))\n # windows implementation only available in upstream master branch\n\n if self.settings.os == \"Macos\" and \\\n not self._has_stack_details(\"backtrace_symbol\"):\n raise ConanInvalidConfiguration(\"only stack_details=backtrace_symbol\"\n \" is supported on Macos\")\n \n def requirements(self):\n if self.settings.os in [\"Linux\", \"Android\"] and \\\n self._has_stack_details(\"dwarf\"):\n self.requires(\"libdwarf/20191104\")\n \n def system_requirements(self):\n required_package = None\n if self.settings.os == \"Linux\":\n if self._has_stack_details(\"dw\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"libdw-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\"]:\n required_package = \"elfutils-libs\"\n elif tools.os_info.linux_distro == \"opensuse\":\n required_package = \"libdw-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"libelf\"\n\n if self._has_stack_details(\"bfd\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"binutils-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\", \"opensuse\"]:\n required_package = \"binutils-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"binutils\"\n elif tools.os_info.is_freebsd:\n required_package = \"libbfd\"\n \n if required_package != None:\n installer = tools.SystemPackageTool()\n if not installer.installed(required_package):\n raise ConanInvalidConfiguration(\"backward-cpp requires {}.\".format(required_package))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['STACK_WALKING_UNWIND'] = self._has_stack_walking(\"unwind\")\n cmake.definitions['STACK_WALKING_BACKTRACE'] = self._has_stack_walking(\"backtrace\")\n cmake.definitions['STACK_DETAILS_AUTO_DETECT'] = False\n cmake.definitions['STACK_DETAILS_BACKTRACE_SYMBOL'] = self._has_stack_details(\"backtrace_symbol\")\n cmake.definitions['STACK_DETAILS_DW'] = self._has_stack_details(\"dw\")\n cmake.definitions['STACK_DETAILS_BFD'] = self._has_stack_details(\"bfd\")\n cmake.definitions['STACK_DETAILS_DWARF'] = self._has_stack_details(\"dwarf\")\n cmake.definitions['BACKWARD_SHARED'] = self.options.shared\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n os.remove(os.path.join(self.package_folder, \"lib\", \"backward\", \"BackwardConfig.cmake\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Backward\"\n\n self.cpp_info.defines.append('BACKWARD_HAS_UNWIND={}'.format(int(self._has_stack_walking(\"unwind\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE={}'.format(int(self._has_stack_walking(\"backtrace\"))))\n \n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE_SYMBOL={}'.format(int(self._has_stack_details(\"backtrace_symbol\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DW={}'.format(int(self._has_stack_details(\"dw\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BFD={}'.format(int(self._has_stack_details(\"bfd\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DWARF={}'.format(int(self._has_stack_details(\"dwarf\"))))\n\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\"])\n if self._has_stack_details(\"dw\"):\n self.cpp_info.system_libs.extend([\"dw\"]) \n if self._has_stack_details(\"bfd\"):\n self.cpp_info.system_libs.extend([\"bfd\"])\n\n\n \n", "path": "recipes/backward-cpp/all/conanfile.py" } ]
[ { "content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass BackwardCppConan(ConanFile):\n name = \"backward-cpp\"\n description = \"A beautiful stack trace pretty printer for C++\"\n homepage = \"https://github.com/bombela/backward-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"conan\", \"backward-cpp\", \"stack-trace\")\n license = \"MIT\"\n exports_sources = [ \"CMakeLists.txt\", \"patches/backward-cpp-*.patch\" ]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"stack_walking\" : [\"unwind\", \"backtrace\"],\n \"stack_details\" : [\"dw\", \"bfd\", \"dwarf\", \"backtrace_symbol\"],\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"stack_walking\": \"unwind\",\n \"stack_details\": \"dwarf\",\n \"shared\": True,\n \"fPIC\": True\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def _has_stack_walking(self, type):\n return self.options.stack_walking == type\n\n def _has_stack_details(self, type):\n return self.options.stack_details == type\n \n def configure(self):\n if self.settings.os not in [\"Linux\", \"Macos\", \"Android\"]:\n raise ConanInvalidConfiguration(\"upstream backward-cpp v{0} is not \\\n supported in {1}.\".format(self.version, self.settings.os))\n # windows implementation only available in upstream master branch\n\n if self.settings.os == \"Macos\" and \\\n not self._has_stack_details(\"backtrace_symbol\"):\n raise ConanInvalidConfiguration(\"only stack_details=backtrace_symbol\"\n \" is supported on Macos\")\n \n def requirements(self):\n if self.settings.os in [\"Linux\", \"Android\"] and \\\n self._has_stack_details(\"dwarf\"):\n self.requires(\"libdwarf/20191104\")\n \n def system_requirements(self):\n required_package = None\n if self.settings.os == \"Linux\":\n if self._has_stack_details(\"dw\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"libdw-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\"]:\n required_package = \"elfutils-libs\"\n elif tools.os_info.linux_distro == \"opensuse\":\n required_package = \"libdw-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"libelf\"\n\n if self._has_stack_details(\"bfd\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"binutils-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\", \"opensuse\"]:\n required_package = \"binutils-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"binutils\"\n elif tools.os_info.is_freebsd:\n required_package = \"libbfd\"\n \n if required_package != None:\n installer = tools.SystemPackageTool()\n if not installer.installed(required_package):\n raise ConanInvalidConfiguration(\"backward-cpp requires {}.\".format(required_package))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['STACK_WALKING_UNWIND'] = self._has_stack_walking(\"unwind\")\n cmake.definitions['STACK_WALKING_BACKTRACE'] = self._has_stack_walking(\"backtrace\")\n cmake.definitions['STACK_DETAILS_AUTO_DETECT'] = False\n cmake.definitions['STACK_DETAILS_BACKTRACE_SYMBOL'] = self._has_stack_details(\"backtrace_symbol\")\n cmake.definitions['STACK_DETAILS_DW'] = self._has_stack_details(\"dw\")\n cmake.definitions['STACK_DETAILS_BFD'] = self._has_stack_details(\"bfd\")\n cmake.definitions['STACK_DETAILS_DWARF'] = self._has_stack_details(\"dwarf\")\n cmake.definitions['BACKWARD_SHARED'] = self.options.shared\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n os.remove(os.path.join(self.package_folder, \"lib\", \"backward\", \"BackwardConfig.cmake\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Backward\"\n\n self.cpp_info.defines.append('BACKWARD_HAS_UNWIND={}'.format(int(self._has_stack_walking(\"unwind\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE={}'.format(int(self._has_stack_walking(\"backtrace\"))))\n \n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE_SYMBOL={}'.format(int(self._has_stack_details(\"backtrace_symbol\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DW={}'.format(int(self._has_stack_details(\"dw\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BFD={}'.format(int(self._has_stack_details(\"bfd\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DWARF={}'.format(int(self._has_stack_details(\"dwarf\"))))\n\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\"])\n if self._has_stack_details(\"dw\"):\n self.cpp_info.system_libs.extend([\"dw\"]) \n if self._has_stack_details(\"bfd\"):\n self.cpp_info.system_libs.extend([\"bfd\"])\n\n\n \n", "path": "recipes/backward-cpp/all/conanfile.py" } ]
diff --git a/recipes/backward-cpp/all/conanfile.py b/recipes/backward-cpp/all/conanfile.py index 4fddde8f6a781..d96d1bc4063bc 100644 --- a/recipes/backward-cpp/all/conanfile.py +++ b/recipes/backward-cpp/all/conanfile.py @@ -111,7 +111,6 @@ def package(self): os.remove(os.path.join(self.package_folder, "lib", "backward", "BackwardConfig.cmake")) def package_info(self): - self.cpp_info.names["cmake"] = "Backward" self.cpp_info.names["cmake_find_package"] = "Backward" self.cpp_info.names["cmake_find_package_multi"] = "Backward"
[question] Consistency in package and generator names After the discussion in https://github.com/conan-io/conan/issues/6269#issuecomment-570182130, most packages got normalized to use the format discussed in this comment (https://github.com/conan-io/conan-center-index/pull/690 and related). If I understand the intention correctly, in CMake the package should always be added with `CONAN_PKG::{name}` (for example `openssl`) and conan will then expand the correct `Find*`-macro using `self.cpp_info.names["cmake_find_package"]` or `self.cpp_info.names["cmake_find_package_multi"]` (in this case `OpenSSL`). Some recipes now use an additonal `self.cpp_info.names['cmake']` (a cursory search found [libcurl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/libcurl/all/conanfile.py#L374) and [backward-cpp](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/backward-cpp/all/conanfile.py#L114)). This leads to strange behavior: - the `cmake` generator expects `CONAN_PKG::CURL` and will fail with `CONAN_PKG::libcurl` - the `cmake_multi` works the opposite way, working with `CONAN_PKG::libcurl` and failing with uppercase `CURL` In terms of consistency, I tend to say that the behavior of `cmake_multi` is the correct one, but either way, both CMake-variants should at least behave the same way. I'm not sure if there are any side effects in removing the offending lines. I didn't check if other generators have a similar behavior for different packages. It might be a good idea to double-check all occurrences of superfluous or missing `cpp_info.names`, ([tcl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/tcl/8.6.10/conanfile.py#L198), for example is missing the _multi_ entry). Is there a specific reason to split `cmake` and `cmake_multi` in this case (maybe a question to move to the main conan repo)?
openstates__openstates-scrapers-2041
[ { "content": "import os\nimport datetime\n\nfrom pupa.scrape import Scraper, Bill, VoteEvent\nfrom pupa.scrape.base import ScrapeError\n\nimport xlrd\nimport scrapelib\nimport lxml.html\nimport pytz\n\n\nclass OHBillScraper(Scraper):\n _tz = pytz.timezone('US/Eastern')\n\n def scrape(self, session=None, chambers=None):\n # Bills endpoint can sometimes take a very long time to load\n self.timeout = 300\n\n if not session:\n session = self.latest_session()\n self.info('no session, using %s', session)\n\n if int(session) < 128:\n raise AssertionError(\"No data for period {}\".format(session))\n\n elif int(session) < 131:\n # they changed their data format starting in 131st and added\n # an undocumented API\n yield from self.old_scrape(session)\n\n else:\n chamber_dict = {\"Senate\": \"upper\", \"House\": \"lower\",\n \"House of Representatives\": \"lower\",\n \"house\": \"lower\", \"senate\": \"upper\"}\n\n # so presumanbly not everything passes, but we haven't\n # seen anything not pass yet, so we'll need to wait\n # till it fails and get the right language in here\n vote_results = {\"approved\": True,\n \"passed\": True,\n \"adopted\": True,\n \"true\": True,\n \"false\": False,\n \"failed\": False,\n True: True,\n False: False}\n\n action_dict = {\"ref_ctte_100\": \"referral-committee\",\n \"intro_100\": \"introduction\",\n \"pass_300\": \"passage\",\n \"intro_110\": \"reading-1\",\n \"refer_210\": \"referral-committee\",\n \"crpt_301\": None,\n \"crpt_317\": None,\n \"concur_606\": \"passage\",\n \"pass_301\": \"passage\",\n \"refer_220\": \"referral-committee\",\n \"intro_102\": [\"introduction\", \"passage\"],\n \"intro_105\": [\"introduction\", \"passage\"],\n \"intro_ref_ctte_100\": \"referral-committee\",\n \"refer_209\": None,\n \"intro_108\": [\"introduction\", \"passage\"],\n \"intro_103\": [\"introduction\", \"passage\"],\n \"msg_reso_503\": \"passage\",\n \"intro_107\": [\"introduction\", \"passage\"],\n \"imm_consid_360\": \"passage\",\n \"refer_213\": None,\n \"adopt_reso_100\": \"passage\",\n \"msg_507\": \"amendment-passage\",\n \"confer_713\": None,\n \"concur_603\": None,\n \"confer_712\": None,\n \"msg_506\": \"amendment-failure\",\n \"receive_message_100\": \"passage\",\n \"motion_920\": None,\n \"concur_611\": None,\n \"confer_735\": None\n }\n\n base_url = \"http://search-prod.lis.state.oh.us\"\n first_page = base_url\n first_page += \"/solarapi/v1/general_assembly_{session}/\".format(session=session)\n legislators = self.get_legislator_ids(first_page)\n all_amendments = self.get_other_data_source(first_page, base_url, \"amendments\")\n all_fiscals = self.get_other_data_source(first_page, base_url, \"fiscals\")\n all_synopsis = self.get_other_data_source(first_page, base_url, \"synopsiss\")\n all_analysis = self.get_other_data_source(first_page, base_url, \"analysiss\")\n\n for row in self.get_bill_rows(session):\n number_link, ga, title, primary_sponsor, status = row.xpath('td')\n\n bill_id = number_link.text_content()\n title = title.text_content().strip()\n chamber = 'lower' if 'H' in bill_id else 'upper'\n classification = 'bill' if 'B' in bill_id else 'resolution'\n\n bill = Bill(bill_id, legislative_session=session, chamber=chamber,\n title=title, classification=classification)\n bill.add_source(number_link.xpath('a/@href')[0])\n\n # get bill from API\n bill_api_url = ('http://search-prod.lis.state.oh.us/solarapi/v1/'\n 'general_assembly_{}/{}/{}/'.format(\n session,\n 'bills' if 'B' in bill_id else 'resolutions',\n bill_id.lower().replace(' ', '')\n ))\n data = self.get(bill_api_url).json()\n\n # add title if no short title\n if not bill.title:\n bill.title = data['items'][0]['longtitle']\n bill.add_title(data['items'][0]['longtitle'], 'long title')\n\n # this stuff is version-specific\n for version in data['items']:\n version_name = version[\"version\"]\n version_link = base_url+version[\"pdfDownloadLink\"]\n bill.add_version_link(version_name, version_link, media_type='application/pdf')\n\n # we'll use latest bill_version for everything else\n bill_version = data['items'][0]\n bill.add_source(bill_api_url)\n\n # subjects\n for subj in bill_version[\"subjectindexes\"]:\n try:\n bill.add_subject(subj[\"primary\"])\n except KeyError:\n pass\n try:\n secondary_subj = subj[\"secondary\"]\n except KeyError:\n secondary_subj = \"\"\n if secondary_subj:\n bill.add_subject(secondary_subj)\n\n # sponsors\n sponsors = bill_version[\"sponsors\"]\n for sponsor in sponsors:\n sponsor_name = self.get_sponsor_name(sponsor)\n bill.add_sponsorship(\n sponsor_name,\n classification='primary',\n entity_type='person',\n primary=True\n )\n\n cosponsors = bill_version[\"cosponsors\"]\n for sponsor in cosponsors:\n sponsor_name = self.get_sponsor_name(sponsor)\n bill.add_sponsorship(\n sponsor_name,\n classification='cosponsor',\n entity_type='person',\n primary=False,\n )\n\n try:\n action_doc = self.get(base_url+bill_version[\"action\"][0][\"link\"])\n except scrapelib.HTTPError:\n pass\n else:\n\n actions = action_doc.json()\n for action in reversed(actions[\"items\"]):\n actor = chamber_dict[action[\"chamber\"]]\n action_desc = action[\"description\"]\n try:\n action_type = action_dict[action[\"actioncode\"]]\n except KeyError:\n self.warning(\"Unknown action {desc} with code {code}.\"\n \" Add it to the action_dict\"\n \".\".format(desc=action_desc,\n code=action[\"actioncode\"]))\n action_type = None\n\n date = self._tz.localize(datetime.datetime.strptime(\n action[\"datetime\"],\n \"%Y-%m-%dT%H:%M:%S\"))\n date = \"{:%Y-%m-%d}\".format(date)\n\n bill.add_action(action_desc,\n date, chamber=actor,\n classification=action_type)\n\n # attach documents gathered earlier\n self.add_document(all_amendments, bill_id, \"amendment\", bill, base_url)\n self.add_document(all_fiscals, bill_id, \"fiscal\", bill, base_url)\n self.add_document(all_synopsis, bill_id, \"synopsis\", bill, base_url)\n self.add_document(all_analysis, bill_id, \"analysis\", bill, base_url)\n\n # votes\n vote_url = base_url+bill_version[\"votes\"][0][\"link\"]\n vote_doc = self.get(vote_url)\n votes = vote_doc.json()\n yield from self.process_vote(votes, vote_url,\n base_url, bill, legislators,\n chamber_dict, vote_results)\n\n vote_url = base_url\n vote_url += bill_version[\"cmtevotes\"][0][\"link\"]\n try:\n vote_doc = self.get(vote_url)\n except scrapelib.HTTPError:\n self.warning(\"Vote page not \"\n \"loading; skipping: {}\".format(vote_url))\n continue\n votes = vote_doc.json()\n yield from self.process_vote(votes, vote_url,\n base_url, bill, legislators,\n chamber_dict, vote_results)\n\n # we have never seen a veto or a disapprove, but they seem important.\n # so we'll check and throw an error if we find one\n # life is fragile. so are our scrapers.\n if \"veto\" in bill_version:\n veto_url = base_url+bill_version[\"veto\"][0][\"link\"]\n veto_json = self.get(veto_url).json()\n if len(veto_json[\"items\"]) > 0:\n raise AssertionError(\"Whoa, a veto! We've never\"\n \" gotten one before.\"\n \" Go write some code to deal\"\n \" with it: {}\".format(veto_url))\n\n if \"disapprove\" in bill_version:\n disapprove_url = base_url+bill_version[\"disapprove\"][0][\"link\"]\n disapprove_json = self.get(disapprove_url).json()\n if len(disapprove_json[\"items\"]) > 0:\n raise AssertionError(\"Whoa, a disapprove! We've never\"\n \" gotten one before.\"\n \" Go write some code to deal \"\n \"with it: {}\".format(disapprove_url))\n\n yield bill\n\n def pages(self, base_url, first_page):\n page = self.get(first_page)\n page = page.json()\n yield page\n while \"nextLink\" in page:\n page = self.get(base_url+page[\"nextLink\"])\n page = page.json()\n yield page\n\n def get_bill_rows(self, session, start=1):\n # bill API endpoint times out so we're now getting this from the normal search\n bill_url = ('https://www.legislature.ohio.gov/legislation?pageSize=500&start={}&'\n 'sort=LegislationNumber&dir=asc&statusCode&generalAssemblies={}'.format(\n start, session)\n )\n doc = self.get(bill_url)\n doc = lxml.html.fromstring(doc.text)\n doc.make_links_absolute(bill_url)\n\n rows = doc.xpath('//tr')[1:]\n yield from rows\n if len(rows) == 500:\n yield from self.get_bill_rows(session, start+500)\n # if page is full, get next page - could use pagination info in\n # //div[id=\"searchResultsInfo\"] to improve this\n\n def get_other_data_source(self, first_page, base_url, source_name):\n # produces a dictionary from bill_id to a list of\n # one of the following:\n # amendments, analysis, fiscals, synopsis\n # could pull these by bill, but doing it in bulk\n # and then matching on our end will get us by with way fewer\n # api calls\n\n bill_dict = {}\n for page in self.pages(base_url, first_page+source_name):\n for item in page[\"items\"]:\n billno = item[\"billno\"]\n if billno not in bill_dict:\n bill_dict[billno] = []\n bill_dict[billno].append(item)\n\n return bill_dict\n\n def add_document(self, documents, bill_id, type_of_document, bill, base_url):\n try:\n documents = documents[bill_id]\n except KeyError:\n return\n\n leg_ver_types = {\"IN\": \"Introduction\",\n \"RS\": \"Reported: Senate\",\n \"PS\": \"Passed: Senate\",\n \"RH\": \"Reported: House\",\n \"PH\": \"Passed: House\",\n \"\": \"\",\n \"ICS\": \"\",\n \"IC\": \"\",\n \"RCS\": \"\",\n \"EN\": \"Enacted\",\n \"RCH\": \"Re-referred\",\n \"RRH\": \"\",\n \"PHC\": \"\",\n \"CR\": \"\"\n }\n\n for item in documents:\n if type_of_document == \"amendment\":\n name = item[\"amendnum\"] + \" \" + item[\"version\"]\n else:\n name = item[\"name\"] or type_of_document\n link = base_url+item[\"link\"]+\"?format=pdf\"\n try:\n self.head(link)\n except scrapelib.HTTPError:\n self.logger.warning(\"The link to doc {name}\"\n \" does not exist, skipping\".format(name=name))\n continue\n if \"legacyver\" in item:\n try:\n ver = leg_ver_types[item[\"legacyver\"]]\n except KeyError:\n self.logger.warning(\n \"New legacyver; check the type and add it to the \"\n \"leg_ver_types dictionary: {} ({})\".format(\n item[\"legacyver\"], item['link']))\n ver = \"\"\n if ver:\n name = name+\": \"+ver\n bill.add_document_link(name, link, media_type=\"application/pdf\")\n\n def get_legislator_ids(self, base_url):\n legislators = {}\n for chamber in [\"House\", \"Senate\"]:\n url = base_url+\"chamber/{chamber}/legislators?per_page=100\"\n doc = self.get(url.format(chamber=chamber))\n leg_json = doc.json()\n for leg in leg_json[\"items\"]:\n legislators[leg[\"med_id\"]] = leg[\"displayname\"]\n\n return legislators\n\n def get_sponsor_name(self, sponsor):\n return \" \".join([sponsor[\"firstname\"], sponsor[\"lastname\"]])\n\n def process_vote(self, votes, url, base_url, bill, legislators, chamber_dict, vote_results):\n for v in votes[\"items\"]:\n try:\n v[\"yeas\"]\n except KeyError:\n # sometimes the actual vote is buried a second layer deep\n v = self.get(base_url+v[\"link\"]).json()\n try:\n v[\"yeas\"]\n except KeyError:\n self.logger.warning(\"No vote info available, skipping\")\n continue\n\n try:\n chamber = chamber_dict[v[\"chamber\"]]\n except KeyError:\n chamber = \"lower\" if \"house\" in v[\"apn\"] else \"upper\"\n try:\n date = self._tz.localize(datetime.datetime.strptime(v[\"date\"], \"%m/%d/%y\"))\n date = \"{:%Y-%m-%d}\".format(date)\n except KeyError:\n try:\n date = self._tz.localize(datetime.datetime.strptime(v[\"occurred\"], \"%m/%d/%y\"))\n date = \"{:%Y-%m-%d}\".format(date)\n except KeyError:\n self.logger.warning(\"No date found for vote, skipping\")\n continue\n try:\n motion = v[\"action\"]\n except KeyError:\n motion = v[\"motiontype\"]\n\n # Sometimes Ohio's SOLAR will only return part of the JSON, so in that case skip\n if (not motion and isinstance(v['yeas'], str)\n and isinstance(v['nays'], str)):\n waringText = 'Malformed JSON found for vote (\"revno\" of {}); skipping'\n self.warning(waringText.format(v['revno']))\n continue\n\n result = v.get(\"results\") or v.get(\"passed\")\n if result is None:\n if len(v['yeas']) > len(v['nays']):\n result = \"passed\"\n else:\n result = \"failed\"\n\n passed = vote_results[result.lower()]\n if \"committee\" in v:\n vote = VoteEvent(chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if passed else 'fail',\n # organization=v[\"committee\"],\n bill=bill,\n classification='passed'\n )\n else:\n vote = VoteEvent(chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if passed else 'fail',\n classification='passed',\n bill=bill\n )\n vote.pupa_id = str(v['revno'])\n # the yea and nay counts are not displayed, but vote totals are\n # and passage status is.\n yes_count = 0\n no_count = 0\n absent_count = 0\n excused_count = 0\n for voter_id in v[\"yeas\"]:\n vote.yes(legislators[voter_id])\n yes_count += 1\n for voter_id in v[\"nays\"]:\n vote.no(legislators[voter_id])\n no_count += 1\n if \"absent\" in v:\n for voter_id in v[\"absent\"]:\n vote.vote('absent', legislators[voter_id])\n absent_count += 1\n if \"excused\" in v:\n for voter_id in v[\"excused\"]:\n vote.vote('excused', legislators[voter_id])\n excused_count += 1\n\n vote.set_count('yes', yes_count)\n vote.set_count('no', no_count)\n vote.set_count('absent', absent_count)\n vote.set_count('excused', excused_count)\n # check to see if there are any other things that look\n # like vote categories, throw a warning if so\n for key, val in v.items():\n if (type(val) == list and len(val) > 0 and\n key not in [\"yeas\", \"nays\", \"absent\", \"excused\"]):\n if val[0] in legislators:\n self.logger.warning(\"{k} looks like a vote type that's not being counted.\"\n \" Double check it?\".format(k=key))\n vote.add_source(url)\n\n yield vote\n\n def old_scrape(self, session=None):\n status_report_url = \"http://www.legislature.ohio.gov/legislation/status-reports\"\n\n # ssl verification off due Ohio not correctly implementing SSL\n if not session:\n session = self.latest_session()\n self.info('no session, using %s', session)\n\n doc = self.get(status_report_url, verify=False).text\n doc = lxml.html.fromstring(doc)\n doc.make_links_absolute(status_report_url)\n xpath = \"//div[contains(text(),'{}')]/following-sibling::table\"\n status_table = doc.xpath(xpath.format(session))[0]\n status_links = status_table.xpath(\".//a[contains(text(),'Excel')]/@href\")\n\n for url in status_links:\n\n try:\n fname, resp = self.urlretrieve(url)\n except scrapelib.HTTPError as report:\n self.logger.warning(\"Missing report {}\".format(report))\n continue\n\n sh = xlrd.open_workbook(fname).sheet_by_index(0)\n\n # once workbook is open, we can remove tempfile\n os.remove(fname)\n for rownum in range(1, sh.nrows):\n bill_id = sh.cell(rownum, 0).value\n\n bill_type = \"resolution\" if \"R\" in bill_id else \"bill\"\n chamber = \"lower\" if \"H\" in bill_id else \"upper\"\n\n bill_title = str(sh.cell(rownum, 3).value)\n\n bill = Bill(\n bill_id,\n legislative_session=session,\n chamber=chamber,\n title=bill_title,\n classification=bill_type\n )\n bill.add_source(url)\n bill.add_sponsor('primary', str(sh.cell(rownum, 1).value))\n\n # add cosponsor\n if sh.cell(rownum, 2).value:\n bill.add_sponsor('cosponsor',\n str(sh.cell(rownum, 2).value))\n\n actor = \"\"\n\n # Actions start column after bill title\n for colnum in range(4, sh.ncols - 1):\n action = str(sh.cell(0, colnum).value)\n cell = sh.cell(rownum, colnum)\n date = cell.value\n\n if len(action) != 0:\n if action.split()[0] == 'House':\n actor = \"lower\"\n elif action.split()[0] == 'Senate':\n actor = \"upper\"\n elif action.split()[-1] == 'Governor':\n actor = \"executive\"\n elif action.split()[0] == 'Gov.':\n actor = \"executive\"\n elif action.split()[-1] == 'Gov.':\n actor = \"executive\"\n\n if action in ('House Intro. Date', 'Senate Intro. Date'):\n atype = ['bill:introduced']\n action = action.replace('Intro. Date', 'Introduced')\n elif action == '3rd Consideration':\n atype = ['bill:reading:3', 'bill:passed']\n elif action == 'Sent to Gov.':\n atype = ['governor:received']\n elif action == 'Signed By Governor':\n atype = ['governor:signed']\n else:\n atype = ['other']\n\n if type(date) == float:\n date = str(xlrd.xldate_as_tuple(date, 0))\n date = datetime.datetime.strptime(\n date, \"(%Y, %m, %d, %H, %M, %S)\")\n date = self._tz.localize(date)\n date = \"{:%Y-%m-%d}\".format(date)\n bill.add_action(actor, action, date, type=atype)\n\n for idx, char in enumerate(bill_id):\n try:\n int(char)\n except ValueError:\n continue\n\n underscore_bill = bill_id[:idx]+\"_\"+bill_id[idx:]\n break\n\n yield from self.scrape_votes_old(bill, underscore_bill, session)\n self.scrape_versions_old(bill, underscore_bill, session)\n yield bill\n\n def scrape_versions_old(self, bill, billname, session):\n base_url = 'http://archives.legislature.state.oh.us/'\n\n if 'R' in billname:\n piece = '/res.cfm?ID=%s_%s' % (session, billname)\n else:\n piece = '/bills.cfm?ID=%s_%s' % (session, billname)\n\n def _get_html_or_pdf_version_old(url):\n doc = lxml.html.fromstring(url)\n name = doc.xpath('//font[@size=\"2\"]/a/text()')[0]\n html_links = doc.xpath('//a[text()=\"(.html format)\"]')\n pdf_links = doc.xpath('//a[text()=\"(.pdf format)\"]')\n if html_links:\n link = html_links[0].get('href')\n bill.add_version_link(name, base_url + link, on_duplicate='use_old',\n media_type='text/html')\n elif pdf_links:\n link = pdf_links[0].get('href')\n bill.add_version_link(name, base_url + link,\n media_type='application/pdf')\n\n html = self.get(base_url + piece).text\n # pass over missing bills - (unclear why this happens)\n if 'could not be found.' in html:\n self.warning('missing page: %s' % base_url + piece)\n return\n\n _get_html_or_pdf_version_old(html)\n doc = lxml.html.fromstring(html)\n for a in doc.xpath('//a[starts-with(@href, \"/bills.cfm\")]/@href'):\n if a != piece:\n _get_html_or_pdf_version_old(self.get(base_url + a).text)\n for a in doc.xpath('//a[starts-with(@href, \"/res.cfm\")]/@href'):\n if a != piece:\n _get_html_or_pdf_version_old(self.get(base_url + a).text)\n\n def scrape_votes_old(self, bill, billname, session):\n vote_url = ('http://archives.legislature.state.oh.us/bills.cfm?ID=' +\n session + '_' + billname)\n\n page = self.get(vote_url).text\n page = lxml.html.fromstring(page)\n\n for jlink in page.xpath(\"//a[contains(@href, 'JournalText')]\"):\n date = self._tz.localize(datetime.datetime.strptime(jlink.text,\n \"%m/%d/%Y\")).date()\n date = \"{:%Y-%m-%d}\".format(date)\n details = jlink.xpath(\"string(../../../td[2])\")\n\n chamber = details.split(\" - \")[0]\n if chamber == 'House':\n chamber = 'lower'\n elif chamber == 'Senate':\n chamber = 'upper'\n else:\n raise ScrapeError(\"Bad chamber: %s\" % chamber)\n\n motion = details.split(\" - \")[1].split(\"\\n\")[0].strip()\n\n vote_row = jlink.xpath(\"../../..\")[0].getnext()\n\n yea_div = vote_row.xpath(\n \"td/font/div[contains(@id, 'Yea')]\")[0]\n yeas = []\n for td in yea_div.xpath(\"table/tr/td\"):\n name = td.xpath(\"string()\")\n if name:\n yeas.append(name)\n\n no_div = vote_row.xpath(\n \"td/font/div[contains(@id, 'Nay')]\")[0]\n nays = []\n for td in no_div.xpath(\"table/tr/td\"):\n name = td.xpath(\"string()\")\n if name:\n nays.append(name)\n\n yes_count = len(yeas)\n no_count = len(nays)\n\n vote = VoteEvent(\n chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if yes_count > no_count else 'fail',\n bill=bill,\n classification='passed'\n )\n\n for yes in yeas:\n vote.yes(yes)\n for no in nays:\n vote.no(no)\n\n vote.add_source(vote_url)\n\n yield vote\n", "path": "openstates/oh/bills.py" } ]
[ { "content": "import os\nimport datetime\n\nfrom pupa.scrape import Scraper, Bill, VoteEvent\nfrom pupa.scrape.base import ScrapeError\n\nimport xlrd\nimport scrapelib\nimport lxml.html\nimport pytz\n\n\nclass OHBillScraper(Scraper):\n _tz = pytz.timezone('US/Eastern')\n\n def scrape(self, session=None, chambers=None):\n # Bills endpoint can sometimes take a very long time to load\n self.timeout = 300\n\n if not session:\n session = self.latest_session()\n self.info('no session, using %s', session)\n\n if int(session) < 128:\n raise AssertionError(\"No data for period {}\".format(session))\n\n elif int(session) < 131:\n # they changed their data format starting in 131st and added\n # an undocumented API\n yield from self.old_scrape(session)\n\n else:\n chamber_dict = {\"Senate\": \"upper\", \"House\": \"lower\",\n \"House of Representatives\": \"lower\",\n \"house\": \"lower\", \"senate\": \"upper\"}\n\n # so presumanbly not everything passes, but we haven't\n # seen anything not pass yet, so we'll need to wait\n # till it fails and get the right language in here\n vote_results = {\"approved\": True,\n \"passed\": True,\n \"adopted\": True,\n \"true\": True,\n \"false\": False,\n \"failed\": False,\n True: True,\n False: False}\n\n action_dict = {\"ref_ctte_100\": \"referral-committee\",\n \"intro_100\": \"introduction\",\n \"pass_300\": \"passage\",\n \"intro_110\": \"reading-1\",\n \"refer_210\": \"referral-committee\",\n \"crpt_301\": None,\n \"crpt_317\": None,\n \"concur_606\": \"passage\",\n \"pass_301\": \"passage\",\n \"refer_220\": \"referral-committee\",\n \"intro_102\": [\"introduction\", \"passage\"],\n \"intro_105\": [\"introduction\", \"passage\"],\n \"intro_ref_ctte_100\": \"referral-committee\",\n \"refer_209\": None,\n \"intro_108\": [\"introduction\", \"passage\"],\n \"intro_103\": [\"introduction\", \"passage\"],\n \"msg_reso_503\": \"passage\",\n \"intro_107\": [\"introduction\", \"passage\"],\n \"imm_consid_360\": \"passage\",\n \"refer_213\": None,\n \"adopt_reso_100\": \"passage\",\n \"msg_507\": \"amendment-passage\",\n \"confer_713\": None,\n \"concur_603\": None,\n \"confer_712\": None,\n \"msg_506\": \"amendment-failure\",\n \"receive_message_100\": \"passage\",\n \"motion_920\": None,\n \"concur_611\": None,\n \"confer_735\": None\n }\n\n base_url = \"http://search-prod.lis.state.oh.us\"\n first_page = base_url\n first_page += \"/solarapi/v1/general_assembly_{session}/\".format(session=session)\n legislators = self.get_legislator_ids(first_page)\n all_amendments = self.get_other_data_source(first_page, base_url, \"amendments\")\n all_fiscals = self.get_other_data_source(first_page, base_url, \"fiscals\")\n all_synopsis = self.get_other_data_source(first_page, base_url, \"synopsiss\")\n all_analysis = self.get_other_data_source(first_page, base_url, \"analysiss\")\n\n for row in self.get_bill_rows(session):\n number_link, ga, title, primary_sponsor, status = row.xpath('td')\n\n bill_id = number_link.text_content()\n title = title.text_content().strip()\n chamber = 'lower' if 'H' in bill_id else 'upper'\n classification = 'bill' if 'B' in bill_id else 'resolution'\n\n bill = Bill(bill_id, legislative_session=session, chamber=chamber,\n title=title, classification=classification)\n bill.add_source(number_link.xpath('a/@href')[0])\n\n # get bill from API\n bill_api_url = ('http://search-prod.lis.state.oh.us/solarapi/v1/'\n 'general_assembly_{}/{}/{}/'.format(\n session,\n 'bills' if 'B' in bill_id else 'resolutions',\n bill_id.lower().replace(' ', '')\n ))\n data = self.get(bill_api_url).json()\n\n # add title if no short title\n if not bill.title:\n bill.title = data['items'][0]['longtitle']\n bill.add_title(data['items'][0]['longtitle'], 'long title')\n\n # this stuff is version-specific\n for version in data['items']:\n version_name = version[\"version\"]\n version_link = base_url+version[\"pdfDownloadLink\"]\n bill.add_version_link(version_name, version_link, media_type='application/pdf')\n\n # we'll use latest bill_version for everything else\n bill_version = data['items'][0]\n bill.add_source(bill_api_url)\n\n # subjects\n for subj in bill_version[\"subjectindexes\"]:\n try:\n bill.add_subject(subj[\"primary\"])\n except KeyError:\n pass\n try:\n secondary_subj = subj[\"secondary\"]\n except KeyError:\n secondary_subj = \"\"\n if secondary_subj:\n bill.add_subject(secondary_subj)\n\n # sponsors\n sponsors = bill_version[\"sponsors\"]\n for sponsor in sponsors:\n sponsor_name = self.get_sponsor_name(sponsor)\n bill.add_sponsorship(\n sponsor_name,\n classification='primary',\n entity_type='person',\n primary=True\n )\n\n cosponsors = bill_version[\"cosponsors\"]\n for sponsor in cosponsors:\n sponsor_name = self.get_sponsor_name(sponsor)\n bill.add_sponsorship(\n sponsor_name,\n classification='cosponsor',\n entity_type='person',\n primary=False,\n )\n\n try:\n action_doc = self.get(base_url+bill_version[\"action\"][0][\"link\"])\n except scrapelib.HTTPError:\n pass\n else:\n\n actions = action_doc.json()\n for action in reversed(actions[\"items\"]):\n actor = chamber_dict[action[\"chamber\"]]\n action_desc = action[\"description\"]\n try:\n action_type = action_dict[action[\"actioncode\"]]\n except KeyError:\n self.warning(\"Unknown action {desc} with code {code}.\"\n \" Add it to the action_dict\"\n \".\".format(desc=action_desc,\n code=action[\"actioncode\"]))\n action_type = None\n\n date = self._tz.localize(datetime.datetime.strptime(\n action[\"datetime\"],\n \"%Y-%m-%dT%H:%M:%S\"))\n date = \"{:%Y-%m-%d}\".format(date)\n\n bill.add_action(action_desc,\n date, chamber=actor,\n classification=action_type)\n\n # attach documents gathered earlier\n self.add_document(all_amendments, bill_id, \"amendment\", bill, base_url)\n self.add_document(all_fiscals, bill_id, \"fiscal\", bill, base_url)\n self.add_document(all_synopsis, bill_id, \"synopsis\", bill, base_url)\n self.add_document(all_analysis, bill_id, \"analysis\", bill, base_url)\n\n # votes\n vote_url = base_url+bill_version[\"votes\"][0][\"link\"]\n vote_doc = self.get(vote_url)\n votes = vote_doc.json()\n yield from self.process_vote(votes, vote_url,\n base_url, bill, legislators,\n chamber_dict, vote_results)\n\n vote_url = base_url\n vote_url += bill_version[\"cmtevotes\"][0][\"link\"]\n try:\n vote_doc = self.get(vote_url)\n except scrapelib.HTTPError:\n self.warning(\"Vote page not \"\n \"loading; skipping: {}\".format(vote_url))\n continue\n votes = vote_doc.json()\n yield from self.process_vote(votes, vote_url,\n base_url, bill, legislators,\n chamber_dict, vote_results)\n\n # we have never seen a veto or a disapprove, but they seem important.\n # so we'll check and throw an error if we find one\n # life is fragile. so are our scrapers.\n if \"veto\" in bill_version:\n veto_url = base_url+bill_version[\"veto\"][0][\"link\"]\n veto_json = self.get(veto_url).json()\n if len(veto_json[\"items\"]) > 0:\n raise AssertionError(\"Whoa, a veto! We've never\"\n \" gotten one before.\"\n \" Go write some code to deal\"\n \" with it: {}\".format(veto_url))\n\n if \"disapprove\" in bill_version:\n disapprove_url = base_url+bill_version[\"disapprove\"][0][\"link\"]\n disapprove_json = self.get(disapprove_url).json()\n if len(disapprove_json[\"items\"]) > 0:\n raise AssertionError(\"Whoa, a disapprove! We've never\"\n \" gotten one before.\"\n \" Go write some code to deal \"\n \"with it: {}\".format(disapprove_url))\n\n yield bill\n\n def pages(self, base_url, first_page):\n page = self.get(first_page)\n page = page.json()\n yield page\n while \"nextLink\" in page:\n page = self.get(base_url+page[\"nextLink\"])\n page = page.json()\n yield page\n\n def get_bill_rows(self, session, start=1):\n # bill API endpoint times out so we're now getting this from the normal search\n bill_url = ('https://www.legislature.ohio.gov/legislation?pageSize=500&start={}&'\n 'sort=LegislationNumber&dir=asc&statusCode&generalAssemblies={}'.format(\n start, session)\n )\n doc = self.get(bill_url)\n doc = lxml.html.fromstring(doc.text)\n doc.make_links_absolute(bill_url)\n\n rows = doc.xpath('//tr')[1:]\n yield from rows\n if len(rows) == 500:\n yield from self.get_bill_rows(session, start+500)\n # if page is full, get next page - could use pagination info in\n # //div[id=\"searchResultsInfo\"] to improve this\n\n def get_other_data_source(self, first_page, base_url, source_name):\n # produces a dictionary from bill_id to a list of\n # one of the following:\n # amendments, analysis, fiscals, synopsis\n # could pull these by bill, but doing it in bulk\n # and then matching on our end will get us by with way fewer\n # api calls\n\n bill_dict = {}\n for page in self.pages(base_url, first_page+source_name):\n for item in page[\"items\"]:\n billno = item[\"billno\"]\n if billno not in bill_dict:\n bill_dict[billno] = []\n bill_dict[billno].append(item)\n\n return bill_dict\n\n def add_document(self, documents, bill_id, type_of_document, bill, base_url):\n try:\n documents = documents[bill_id]\n except KeyError:\n return\n\n leg_ver_types = {\"IN\": \"Introduction\",\n \"RS\": \"Reported: Senate\",\n \"PS\": \"Passed: Senate\",\n \"RH\": \"Reported: House\",\n \"PH\": \"Passed: House\",\n \"\": \"\",\n \"ICS\": \"\",\n \"IC\": \"\",\n \"RCS\": \"\",\n \"EN\": \"Enacted\",\n \"RCH\": \"Re-referred\",\n \"RRH\": \"\",\n \"PHC\": \"\",\n \"CR\": \"\"\n }\n\n for item in documents:\n if type_of_document == \"amendment\":\n name = item[\"amendnum\"] + \" \" + item[\"version\"]\n else:\n name = item[\"name\"] or type_of_document\n link = base_url+item[\"link\"]+\"?format=pdf\"\n try:\n self.head(link)\n except scrapelib.HTTPError:\n self.logger.warning(\"The link to doc {name}\"\n \" does not exist, skipping\".format(name=name))\n continue\n if \"legacyver\" in item:\n try:\n ver = leg_ver_types[item[\"legacyver\"]]\n except KeyError:\n self.logger.warning(\n \"New legacyver; check the type and add it to the \"\n \"leg_ver_types dictionary: {} ({})\".format(\n item[\"legacyver\"], item['link']))\n ver = \"\"\n if ver:\n name = name+\": \"+ver\n bill.add_document_link(name, link, media_type=\"application/pdf\")\n\n def get_legislator_ids(self, base_url):\n legislators = {}\n for chamber in [\"House\", \"Senate\"]:\n url = base_url+\"chamber/{chamber}/legislators?per_page=100\"\n doc = self.get(url.format(chamber=chamber))\n leg_json = doc.json()\n for leg in leg_json[\"items\"]:\n if leg[\"med_id\"]:\n legislators[int(leg[\"med_id\"])] = leg[\"displayname\"]\n return legislators\n\n def get_sponsor_name(self, sponsor):\n return \" \".join([sponsor[\"firstname\"], sponsor[\"lastname\"]])\n\n def process_vote(self, votes, url, base_url, bill, legislators, chamber_dict, vote_results):\n for v in votes[\"items\"]:\n try:\n v[\"yeas\"]\n except KeyError:\n # sometimes the actual vote is buried a second layer deep\n v = self.get(base_url+v[\"link\"]).json()\n try:\n v[\"yeas\"]\n except KeyError:\n self.logger.warning(\"No vote info available, skipping\")\n continue\n\n try:\n chamber = chamber_dict[v[\"chamber\"]]\n except KeyError:\n chamber = \"lower\" if \"house\" in v[\"apn\"] else \"upper\"\n try:\n date = self._tz.localize(datetime.datetime.strptime(v[\"date\"], \"%m/%d/%y\"))\n date = \"{:%Y-%m-%d}\".format(date)\n except KeyError:\n try:\n date = self._tz.localize(datetime.datetime.strptime(v[\"occurred\"], \"%m/%d/%y\"))\n date = \"{:%Y-%m-%d}\".format(date)\n except KeyError:\n self.logger.warning(\"No date found for vote, skipping\")\n continue\n try:\n motion = v[\"action\"]\n except KeyError:\n motion = v[\"motiontype\"]\n\n # Sometimes Ohio's SOLAR will only return part of the JSON, so in that case skip\n if (not motion and isinstance(v['yeas'], str)\n and isinstance(v['nays'], str)):\n waringText = 'Malformed JSON found for vote (\"revno\" of {}); skipping'\n self.warning(waringText.format(v['revno']))\n continue\n\n result = v.get(\"results\") or v.get(\"passed\")\n if result is None:\n if len(v['yeas']) > len(v['nays']):\n result = \"passed\"\n else:\n result = \"failed\"\n\n passed = vote_results[result.lower()]\n if \"committee\" in v:\n vote = VoteEvent(chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if passed else 'fail',\n # organization=v[\"committee\"],\n bill=bill,\n classification='passed'\n )\n else:\n vote = VoteEvent(chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if passed else 'fail',\n classification='passed',\n bill=bill\n )\n vote.pupa_id = str(v['revno'])\n # the yea and nay counts are not displayed, but vote totals are\n # and passage status is.\n yes_count = 0\n no_count = 0\n absent_count = 0\n excused_count = 0\n for voter_id in v[\"yeas\"]:\n vote.yes(legislators[voter_id])\n yes_count += 1\n for voter_id in v[\"nays\"]:\n vote.no(legislators[voter_id])\n no_count += 1\n if \"absent\" in v:\n for voter_id in v[\"absent\"]:\n vote.vote('absent', legislators[voter_id])\n absent_count += 1\n if \"excused\" in v:\n for voter_id in v[\"excused\"]:\n vote.vote('excused', legislators[voter_id])\n excused_count += 1\n\n vote.set_count('yes', yes_count)\n vote.set_count('no', no_count)\n vote.set_count('absent', absent_count)\n vote.set_count('excused', excused_count)\n # check to see if there are any other things that look\n # like vote categories, throw a warning if so\n for key, val in v.items():\n if (type(val) == list and len(val) > 0 and\n key not in [\"yeas\", \"nays\", \"absent\", \"excused\"]):\n if val[0] in legislators:\n self.logger.warning(\"{k} looks like a vote type that's not being counted.\"\n \" Double check it?\".format(k=key))\n vote.add_source(url)\n\n yield vote\n\n def old_scrape(self, session=None):\n status_report_url = \"http://www.legislature.ohio.gov/legislation/status-reports\"\n\n # ssl verification off due Ohio not correctly implementing SSL\n if not session:\n session = self.latest_session()\n self.info('no session, using %s', session)\n\n doc = self.get(status_report_url, verify=False).text\n doc = lxml.html.fromstring(doc)\n doc.make_links_absolute(status_report_url)\n xpath = \"//div[contains(text(),'{}')]/following-sibling::table\"\n status_table = doc.xpath(xpath.format(session))[0]\n status_links = status_table.xpath(\".//a[contains(text(),'Excel')]/@href\")\n\n for url in status_links:\n\n try:\n fname, resp = self.urlretrieve(url)\n except scrapelib.HTTPError as report:\n self.logger.warning(\"Missing report {}\".format(report))\n continue\n\n sh = xlrd.open_workbook(fname).sheet_by_index(0)\n\n # once workbook is open, we can remove tempfile\n os.remove(fname)\n for rownum in range(1, sh.nrows):\n bill_id = sh.cell(rownum, 0).value\n\n bill_type = \"resolution\" if \"R\" in bill_id else \"bill\"\n chamber = \"lower\" if \"H\" in bill_id else \"upper\"\n\n bill_title = str(sh.cell(rownum, 3).value)\n\n bill = Bill(\n bill_id,\n legislative_session=session,\n chamber=chamber,\n title=bill_title,\n classification=bill_type\n )\n bill.add_source(url)\n bill.add_sponsor('primary', str(sh.cell(rownum, 1).value))\n\n # add cosponsor\n if sh.cell(rownum, 2).value:\n bill.add_sponsor('cosponsor',\n str(sh.cell(rownum, 2).value))\n\n actor = \"\"\n\n # Actions start column after bill title\n for colnum in range(4, sh.ncols - 1):\n action = str(sh.cell(0, colnum).value)\n cell = sh.cell(rownum, colnum)\n date = cell.value\n\n if len(action) != 0:\n if action.split()[0] == 'House':\n actor = \"lower\"\n elif action.split()[0] == 'Senate':\n actor = \"upper\"\n elif action.split()[-1] == 'Governor':\n actor = \"executive\"\n elif action.split()[0] == 'Gov.':\n actor = \"executive\"\n elif action.split()[-1] == 'Gov.':\n actor = \"executive\"\n\n if action in ('House Intro. Date', 'Senate Intro. Date'):\n atype = ['bill:introduced']\n action = action.replace('Intro. Date', 'Introduced')\n elif action == '3rd Consideration':\n atype = ['bill:reading:3', 'bill:passed']\n elif action == 'Sent to Gov.':\n atype = ['governor:received']\n elif action == 'Signed By Governor':\n atype = ['governor:signed']\n else:\n atype = ['other']\n\n if type(date) == float:\n date = str(xlrd.xldate_as_tuple(date, 0))\n date = datetime.datetime.strptime(\n date, \"(%Y, %m, %d, %H, %M, %S)\")\n date = self._tz.localize(date)\n date = \"{:%Y-%m-%d}\".format(date)\n bill.add_action(actor, action, date, type=atype)\n\n for idx, char in enumerate(bill_id):\n try:\n int(char)\n except ValueError:\n continue\n\n underscore_bill = bill_id[:idx]+\"_\"+bill_id[idx:]\n break\n\n yield from self.scrape_votes_old(bill, underscore_bill, session)\n self.scrape_versions_old(bill, underscore_bill, session)\n yield bill\n\n def scrape_versions_old(self, bill, billname, session):\n base_url = 'http://archives.legislature.state.oh.us/'\n\n if 'R' in billname:\n piece = '/res.cfm?ID=%s_%s' % (session, billname)\n else:\n piece = '/bills.cfm?ID=%s_%s' % (session, billname)\n\n def _get_html_or_pdf_version_old(url):\n doc = lxml.html.fromstring(url)\n name = doc.xpath('//font[@size=\"2\"]/a/text()')[0]\n html_links = doc.xpath('//a[text()=\"(.html format)\"]')\n pdf_links = doc.xpath('//a[text()=\"(.pdf format)\"]')\n if html_links:\n link = html_links[0].get('href')\n bill.add_version_link(name, base_url + link, on_duplicate='use_old',\n media_type='text/html')\n elif pdf_links:\n link = pdf_links[0].get('href')\n bill.add_version_link(name, base_url + link,\n media_type='application/pdf')\n\n html = self.get(base_url + piece).text\n # pass over missing bills - (unclear why this happens)\n if 'could not be found.' in html:\n self.warning('missing page: %s' % base_url + piece)\n return\n\n _get_html_or_pdf_version_old(html)\n doc = lxml.html.fromstring(html)\n for a in doc.xpath('//a[starts-with(@href, \"/bills.cfm\")]/@href'):\n if a != piece:\n _get_html_or_pdf_version_old(self.get(base_url + a).text)\n for a in doc.xpath('//a[starts-with(@href, \"/res.cfm\")]/@href'):\n if a != piece:\n _get_html_or_pdf_version_old(self.get(base_url + a).text)\n\n def scrape_votes_old(self, bill, billname, session):\n vote_url = ('http://archives.legislature.state.oh.us/bills.cfm?ID=' +\n session + '_' + billname)\n\n page = self.get(vote_url).text\n page = lxml.html.fromstring(page)\n\n for jlink in page.xpath(\"//a[contains(@href, 'JournalText')]\"):\n date = self._tz.localize(datetime.datetime.strptime(jlink.text,\n \"%m/%d/%Y\")).date()\n date = \"{:%Y-%m-%d}\".format(date)\n details = jlink.xpath(\"string(../../../td[2])\")\n\n chamber = details.split(\" - \")[0]\n if chamber == 'House':\n chamber = 'lower'\n elif chamber == 'Senate':\n chamber = 'upper'\n else:\n raise ScrapeError(\"Bad chamber: %s\" % chamber)\n\n motion = details.split(\" - \")[1].split(\"\\n\")[0].strip()\n\n vote_row = jlink.xpath(\"../../..\")[0].getnext()\n\n yea_div = vote_row.xpath(\n \"td/font/div[contains(@id, 'Yea')]\")[0]\n yeas = []\n for td in yea_div.xpath(\"table/tr/td\"):\n name = td.xpath(\"string()\")\n if name:\n yeas.append(name)\n\n no_div = vote_row.xpath(\n \"td/font/div[contains(@id, 'Nay')]\")[0]\n nays = []\n for td in no_div.xpath(\"table/tr/td\"):\n name = td.xpath(\"string()\")\n if name:\n nays.append(name)\n\n yes_count = len(yeas)\n no_count = len(nays)\n\n vote = VoteEvent(\n chamber=chamber,\n start_date=date,\n motion_text=motion,\n result='pass' if yes_count > no_count else 'fail',\n bill=bill,\n classification='passed'\n )\n\n for yes in yeas:\n vote.yes(yes)\n for no in nays:\n vote.no(no)\n\n vote.add_source(vote_url)\n\n yield vote\n", "path": "openstates/oh/bills.py" } ]
diff --git a/openstates/oh/bills.py b/openstates/oh/bills.py index e99c7a31b1..723725af5a 100644 --- a/openstates/oh/bills.py +++ b/openstates/oh/bills.py @@ -333,8 +333,8 @@ def get_legislator_ids(self, base_url): doc = self.get(url.format(chamber=chamber)) leg_json = doc.json() for leg in leg_json["items"]: - legislators[leg["med_id"]] = leg["displayname"] - + if leg["med_id"]: + legislators[int(leg["med_id"])] = leg["displayname"] return legislators def get_sponsor_name(self, sponsor):
OH failing since at least 2017-12-23 OH has been failing since 2017-12-23 Based on automated runs it appears that OH has not run successfully in 2 days (2017-12-23). ``` 23:01:27 INFO pupa: save post 85 as post_7fa5619a-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 86 as post_7fa562c6-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 88 as post_7fa56550-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 87 as post_7fa563fc-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 89 as post_7fa56690-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 90 as post_7fa567d0-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 91 as post_7fa56906-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 92 as post_7fa56a32-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 93 as post_7fa56bb8-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 94 as post_7fa56d02-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 95 as post_7fa56e38-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 97 as post_7fa570c2-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 96 as post_7fa56f8c-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 98 as post_7fa57202-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save post 99 as post_7fa57338-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save organization Democratic as organization_7fafd56c-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO pupa: save organization Republican as organization_7fafbd70-e867-11e7-97ee-0242ac110002.json 23:01:27 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/chamber/House/legislators?per_page=100 23:01:27 INFO pupa: no session, using 132 23:01:57 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/chamber/Senate/legislators?per_page=100 23:02:06 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/amendments 23:02:07 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/fiscals 23:02:09 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/synopsiss 23:02:09 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/analysiss 23:02:11 INFO scrapelib: GET - https://www.legislature.ohio.gov/legislation?pageSize=500&start=1&sort=LegislationNumber&dir=asc&statusCode&generalAssemblies=132 23:02:25 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/resolutions/hjr1/ 23:02:26 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/resolutions/hjr1/actions 23:02:27 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/resolutions/hjr1/votes no pupa_settings on path, using defaults oh (scrape, import) bills: {} people: {} return self.do_handle(args, other, juris) File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module> load_entry_point('pupa', 'console_scripts', 'pupa')() File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 67, in main subcommands[args.subcommand].handle(args, other) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle Traceback (most recent call last): File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 305, in do_handle report['scrape'] = self.do_scrape(juris, args, scrapers) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 173, in do_scrape report[scraper_name] = scraper.do_scrape(**scrape_args) File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 111, in do_scrape for obj in self.scrape(**kwargs) or []: File "/opt/openstates/openstates/openstates/oh/bills.py", line 200, in scrape chamber_dict, vote_results) File "/opt/openstates/openstates/openstates/oh/bills.py", line 415, in process_vote vote.yes(legislators[voter_id]) KeyError: 1605 ``` Visit http://bobsled.openstates.org for more info.
aio-libs__aiohttp-1431
[ { "content": "import asyncio\nimport base64\nimport binascii\nimport io\nimport json\nimport mimetypes\nimport os\nimport re\nimport sys\nimport uuid\nimport warnings\nimport zlib\nfrom collections import Mapping, Sequence, deque\nfrom pathlib import Path\nfrom urllib.parse import parse_qsl, quote, unquote, urlencode\n\nfrom multidict import CIMultiDict\n\nfrom .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH,\n CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)\nfrom .helpers import parse_mimetype\nfrom .protocol import HttpParser\n\n__all__ = ('MultipartReader', 'MultipartWriter',\n 'BodyPartReader', 'BodyPartWriter',\n 'BadContentDispositionHeader', 'BadContentDispositionParam',\n 'parse_content_disposition', 'content_disposition_filename')\n\n\nCHAR = set(chr(i) for i in range(0, 128))\nCTL = set(chr(i) for i in range(0, 32)) | {chr(127), }\nSEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\\\', '\"', '/', '[', ']',\n '?', '=', '{', '}', ' ', chr(9)}\nTOKEN = CHAR ^ CTL ^ SEPARATORS\n\nPY_35 = sys.version_info >= (3, 5)\nPY_352 = sys.version_info >= (3, 5, 2)\n\n\nclass BadContentDispositionHeader(RuntimeWarning):\n pass\n\n\nclass BadContentDispositionParam(RuntimeWarning):\n pass\n\n\ndef parse_content_disposition(header):\n def is_token(string):\n return string and TOKEN >= set(string)\n\n def is_quoted(string):\n return string[0] == string[-1] == '\"'\n\n def is_rfc5987(string):\n return is_token(string) and string.count(\"'\") == 2\n\n def is_extended_param(string):\n return string.endswith('*')\n\n def is_continuous_param(string):\n pos = string.find('*') + 1\n if not pos:\n return False\n substring = string[pos:-1] if string.endswith('*') else string[pos:]\n return substring.isdigit()\n\n def unescape(text, *, chars=''.join(map(re.escape, CHAR))):\n return re.sub('\\\\\\\\([{}])'.format(chars), '\\\\1', text)\n\n if not header:\n return None, {}\n\n disptype, *parts = header.split(';')\n if not is_token(disptype):\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n params = {}\n for item in parts:\n if '=' not in item:\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n key, value = item.split('=', 1)\n key = key.lower().strip()\n value = value.lstrip()\n\n if key in params:\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n if not is_token(key):\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n elif is_continuous_param(key):\n if is_quoted(value):\n value = unescape(value[1:-1])\n elif not is_token(value):\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n elif is_extended_param(key):\n if is_rfc5987(value):\n encoding, _, value = value.split(\"'\", 2)\n encoding = encoding or 'utf-8'\n else:\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n try:\n value = unquote(value, encoding, 'strict')\n except UnicodeDecodeError: # pragma: nocover\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n else:\n if is_quoted(value):\n value = unescape(value[1:-1].lstrip('\\\\/'))\n elif not is_token(value):\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n params[key] = value\n\n return disptype.lower(), params\n\n\ndef content_disposition_filename(params):\n if not params:\n return None\n elif 'filename*' in params:\n return params['filename*']\n elif 'filename' in params:\n return params['filename']\n else:\n parts = []\n fnparams = sorted((key, value)\n for key, value in params.items()\n if key.startswith('filename*'))\n for num, (key, value) in enumerate(fnparams):\n _, tail = key.split('*', 1)\n if tail.endswith('*'):\n tail = tail[:-1]\n if tail == str(num):\n parts.append(value)\n else:\n break\n if not parts:\n return None\n value = ''.join(parts)\n if \"'\" in value:\n encoding, _, value = value.split(\"'\", 2)\n encoding = encoding or 'utf-8'\n return unquote(value, encoding, 'strict')\n return value\n\n\nclass MultipartResponseWrapper(object):\n \"\"\"Wrapper around the :class:`MultipartBodyReader` to take care about\n underlying connection and close it when it needs in.\"\"\"\n\n def __init__(self, resp, stream):\n self.resp = resp\n self.stream = stream\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n def at_eof(self):\n \"\"\"Returns ``True`` when all response data had been read.\n\n :rtype: bool\n \"\"\"\n return self.resp.content.at_eof()\n\n @asyncio.coroutine\n def next(self):\n \"\"\"Emits next multipart reader object.\"\"\"\n item = yield from self.stream.next()\n if self.stream.at_eof():\n yield from self.release()\n return item\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Releases the connection gracefully, reading all the content\n to the void.\"\"\"\n yield from self.resp.release()\n\n\nclass BodyPartReader(object):\n \"\"\"Multipart reader for single body part.\"\"\"\n\n chunk_size = 8192\n\n def __init__(self, boundary, headers, content):\n self.headers = headers\n self._boundary = boundary\n self._content = content\n self._at_eof = False\n length = self.headers.get(CONTENT_LENGTH, None)\n self._length = int(length) if length is not None else None\n self._read_bytes = 0\n self._unread = deque()\n self._prev_chunk = None\n self._content_eof = 0\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n @asyncio.coroutine\n def next(self):\n item = yield from self.read()\n if not item:\n return None\n return item\n\n @asyncio.coroutine\n def read(self, *, decode=False):\n \"\"\"Reads body part data.\n\n :param bool decode: Decodes data following by encoding\n method from `Content-Encoding` header. If it missed\n data remains untouched\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n data = bytearray()\n if self._length is None:\n while not self._at_eof:\n data.extend((yield from self.readline()))\n else:\n while not self._at_eof:\n data.extend((yield from self.read_chunk(self.chunk_size)))\n if decode:\n return self.decode(data)\n return data\n\n @asyncio.coroutine\n def read_chunk(self, size=chunk_size):\n \"\"\"Reads body part content chunk of the specified size.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n if self._length:\n chunk = yield from self._read_chunk_from_length(size)\n else:\n chunk = yield from self._read_chunk_from_stream(size)\n\n self._read_bytes += len(chunk)\n if self._read_bytes == self._length:\n self._at_eof = True\n if self._at_eof:\n assert b'\\r\\n' == (yield from self._content.readline()), \\\n 'reader did not read all the data or it is malformed'\n return chunk\n\n @asyncio.coroutine\n def _read_chunk_from_length(self, size):\n \"\"\"Reads body part content chunk of the specified size.\n The body part must has `Content-Length` header with proper value.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n assert self._length is not None, \\\n 'Content-Length required for chunked read'\n chunk_size = min(size, self._length - self._read_bytes)\n chunk = yield from self._content.read(chunk_size)\n return chunk\n\n @asyncio.coroutine\n def _read_chunk_from_stream(self, size):\n \"\"\"Reads content chunk of body part with unknown length.\n The `Content-Length` header for body part is not necessary.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n assert size >= len(self._boundary) + 2, \\\n 'Chunk size must be greater or equal than boundary length + 2'\n first_chunk = self._prev_chunk is None\n if first_chunk:\n self._prev_chunk = yield from self._content.read(size)\n\n chunk = yield from self._content.read(size)\n self._content_eof += int(self._content.at_eof())\n assert self._content_eof < 3, \"Reading after EOF\"\n window = self._prev_chunk + chunk\n sub = b'\\r\\n' + self._boundary\n if first_chunk:\n idx = window.find(sub)\n else:\n idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))\n if idx >= 0:\n # pushing boundary back to content\n self._content.unread_data(window[idx:])\n if size > idx:\n self._prev_chunk = self._prev_chunk[:idx]\n chunk = window[len(self._prev_chunk):idx]\n if not chunk:\n self._at_eof = True\n if 0 < len(chunk) < len(sub) and not self._content_eof:\n self._prev_chunk += chunk\n self._at_eof = False\n return b''\n result = self._prev_chunk\n self._prev_chunk = chunk\n return result\n\n @asyncio.coroutine\n def readline(self):\n \"\"\"Reads body part by line by line.\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n\n if self._unread:\n line = self._unread.popleft()\n else:\n line = yield from self._content.readline()\n\n if line.startswith(self._boundary):\n # the very last boundary may not come with \\r\\n,\n # so set single rules for everyone\n sline = line.rstrip(b'\\r\\n')\n boundary = self._boundary\n last_boundary = self._boundary + b'--'\n # ensure that we read exactly the boundary, not something alike\n if sline == boundary or sline == last_boundary:\n self._at_eof = True\n self._unread.append(line)\n return b''\n else:\n next_line = yield from self._content.readline()\n if next_line.startswith(self._boundary):\n line = line[:-2] # strip CRLF but only once\n self._unread.append(next_line)\n\n return line\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Like :meth:`read`, but reads all the data to the void.\n\n :rtype: None\n \"\"\"\n if self._at_eof:\n return\n if self._length is None:\n while not self._at_eof:\n yield from self.readline()\n else:\n while not self._at_eof:\n yield from self.read_chunk(self.chunk_size)\n\n @asyncio.coroutine\n def text(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body part contains text data.\n\n :param str encoding: Custom text encoding. Overrides specified\n in charset param of `Content-Type` header\n\n :rtype: str\n \"\"\"\n data = yield from self.read(decode=True)\n encoding = encoding or self.get_charset(default='latin1')\n return data.decode(encoding)\n\n @asyncio.coroutine\n def json(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body parts contains JSON data.\n\n :param str encoding: Custom JSON encoding. Overrides specified\n in charset param of `Content-Type` header\n \"\"\"\n data = yield from self.read(decode=True)\n if not data:\n return None\n encoding = encoding or self.get_charset(default='utf-8')\n return json.loads(data.decode(encoding))\n\n @asyncio.coroutine\n def form(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body parts contains form\n urlencoded data.\n\n :param str encoding: Custom form encoding. Overrides specified\n in charset param of `Content-Type` header\n \"\"\"\n data = yield from self.read(decode=True)\n if not data:\n return None\n encoding = encoding or self.get_charset(default='utf-8')\n return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)\n\n def at_eof(self):\n \"\"\"Returns ``True`` if the boundary was reached or\n ``False`` otherwise.\n\n :rtype: bool\n \"\"\"\n return self._at_eof\n\n def decode(self, data):\n \"\"\"Decodes data according the specified `Content-Encoding`\n or `Content-Transfer-Encoding` headers value.\n\n Supports ``gzip``, ``deflate`` and ``identity`` encodings for\n `Content-Encoding` header.\n\n Supports ``base64``, ``quoted-printable``, ``binary`` encodings for\n `Content-Transfer-Encoding` header.\n\n :param bytearray data: Data to decode.\n\n :raises: :exc:`RuntimeError` - if encoding is unknown.\n\n :rtype: bytes\n \"\"\"\n if CONTENT_TRANSFER_ENCODING in self.headers:\n data = self._decode_content_transfer(data)\n if CONTENT_ENCODING in self.headers:\n return self._decode_content(data)\n return data\n\n def _decode_content(self, data):\n encoding = self.headers[CONTENT_ENCODING].lower()\n\n if encoding == 'deflate':\n return zlib.decompress(data, -zlib.MAX_WBITS)\n elif encoding == 'gzip':\n return zlib.decompress(data, 16 + zlib.MAX_WBITS)\n elif encoding == 'identity':\n return data\n else:\n raise RuntimeError('unknown content encoding: {}'.format(encoding))\n\n def _decode_content_transfer(self, data):\n encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()\n\n if encoding == 'base64':\n return base64.b64decode(data)\n elif encoding == 'quoted-printable':\n return binascii.a2b_qp(data)\n elif encoding == 'binary':\n return data\n else:\n raise RuntimeError('unknown content transfer encoding: {}'\n ''.format(encoding))\n\n def get_charset(self, default=None):\n \"\"\"Returns charset parameter from ``Content-Type`` header or default.\n \"\"\"\n ctype = self.headers.get(CONTENT_TYPE, '')\n *_, params = parse_mimetype(ctype)\n return params.get('charset', default)\n\n @property\n def filename(self):\n \"\"\"Returns filename specified in Content-Disposition header or ``None``\n if missed or header is malformed.\"\"\"\n _, params = parse_content_disposition(\n self.headers.get(CONTENT_DISPOSITION))\n return content_disposition_filename(params)\n\n\nclass MultipartReader(object):\n \"\"\"Multipart body reader.\"\"\"\n\n #: Response wrapper, used when multipart readers constructs from response.\n response_wrapper_cls = MultipartResponseWrapper\n #: Multipart reader class, used to handle multipart/* body parts.\n #: None points to type(self)\n multipart_reader_cls = None\n #: Body part reader class for non multipart/* content types.\n part_reader_cls = BodyPartReader\n\n def __init__(self, headers, content):\n self.headers = headers\n self._boundary = ('--' + self._get_boundary()).encode()\n self._content = content\n self._last_part = None\n self._at_eof = False\n self._at_bof = True\n self._unread = []\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n @classmethod\n def from_response(cls, response):\n \"\"\"Constructs reader instance from HTTP response.\n\n :param response: :class:`~aiohttp.client.ClientResponse` instance\n \"\"\"\n obj = cls.response_wrapper_cls(response, cls(response.headers,\n response.content))\n return obj\n\n def at_eof(self):\n \"\"\"Returns ``True`` if the final boundary was reached or\n ``False`` otherwise.\n\n :rtype: bool\n \"\"\"\n return self._at_eof\n\n @asyncio.coroutine\n def next(self):\n \"\"\"Emits the next multipart body part.\"\"\"\n # So, if we're at BOF, we need to skip till the boundary.\n if self._at_eof:\n return\n yield from self._maybe_release_last_part()\n if self._at_bof:\n yield from self._read_until_first_boundary()\n self._at_bof = False\n else:\n yield from self._read_boundary()\n if self._at_eof: # we just read the last boundary, nothing to do there\n return\n self._last_part = yield from self.fetch_next_part()\n return self._last_part\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Reads all the body parts to the void till the final boundary.\"\"\"\n while not self._at_eof:\n item = yield from self.next()\n if item is None:\n break\n yield from item.release()\n\n @asyncio.coroutine\n def fetch_next_part(self):\n \"\"\"Returns the next body part reader.\"\"\"\n headers = yield from self._read_headers()\n return self._get_part_reader(headers)\n\n def _get_part_reader(self, headers):\n \"\"\"Dispatches the response by the `Content-Type` header, returning\n suitable reader instance.\n\n :param dict headers: Response headers\n \"\"\"\n ctype = headers.get(CONTENT_TYPE, '')\n mtype, *_ = parse_mimetype(ctype)\n if mtype == 'multipart':\n if self.multipart_reader_cls is None:\n return type(self)(headers, self._content)\n return self.multipart_reader_cls(headers, self._content)\n else:\n return self.part_reader_cls(self._boundary, headers, self._content)\n\n def _get_boundary(self):\n mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])\n\n assert mtype == 'multipart', 'multipart/* content type expected'\n\n if 'boundary' not in params:\n raise ValueError('boundary missed for Content-Type: %s'\n % self.headers[CONTENT_TYPE])\n\n boundary = params['boundary']\n if len(boundary) > 70:\n raise ValueError('boundary %r is too long (70 chars max)'\n % boundary)\n\n return boundary\n\n @asyncio.coroutine\n def _readline(self):\n if self._unread:\n return self._unread.pop()\n return (yield from self._content.readline())\n\n @asyncio.coroutine\n def _read_until_first_boundary(self):\n while True:\n chunk = yield from self._readline()\n if chunk == b'':\n raise ValueError(\"Could not find starting boundary %r\"\n % (self._boundary))\n chunk = chunk.rstrip()\n if chunk == self._boundary:\n return\n elif chunk == self._boundary + b'--':\n self._at_eof = True\n return\n\n @asyncio.coroutine\n def _read_boundary(self):\n chunk = (yield from self._readline()).rstrip()\n if chunk == self._boundary:\n pass\n elif chunk == self._boundary + b'--':\n self._at_eof = True\n else:\n raise ValueError('Invalid boundary %r, expected %r'\n % (chunk, self._boundary))\n\n @asyncio.coroutine\n def _read_headers(self):\n lines = [b'']\n while True:\n chunk = yield from self._content.readline()\n chunk = chunk.strip()\n lines.append(chunk)\n if not chunk:\n break\n parser = HttpParser()\n headers, *_ = parser.parse_headers(lines)\n return headers\n\n @asyncio.coroutine\n def _maybe_release_last_part(self):\n \"\"\"Ensures that the last read body part is read completely.\"\"\"\n if self._last_part is not None:\n if not self._last_part.at_eof():\n yield from self._last_part.release()\n self._unread.extend(self._last_part._unread)\n self._last_part = None\n\n\nclass BodyPartWriter(object):\n \"\"\"Multipart writer for single body part.\"\"\"\n\n def __init__(self, obj, headers=None, *, chunk_size=8192):\n if headers is None:\n headers = CIMultiDict()\n elif not isinstance(headers, CIMultiDict):\n headers = CIMultiDict(headers)\n\n self.obj = obj\n self.headers = headers\n self._chunk_size = chunk_size\n self._fill_headers_with_defaults()\n\n self._serialize_map = {\n bytes: self._serialize_bytes,\n str: self._serialize_str,\n io.IOBase: self._serialize_io,\n MultipartWriter: self._serialize_multipart,\n ('application', 'json'): self._serialize_json,\n ('application', 'x-www-form-urlencoded'): self._serialize_form\n }\n\n def _fill_headers_with_defaults(self):\n if CONTENT_TYPE not in self.headers:\n content_type = self._guess_content_type(self.obj)\n if content_type is not None:\n self.headers[CONTENT_TYPE] = content_type\n\n if CONTENT_LENGTH not in self.headers:\n content_length = self._guess_content_length(self.obj)\n if content_length is not None:\n self.headers[CONTENT_LENGTH] = str(content_length)\n\n if CONTENT_DISPOSITION not in self.headers:\n filename = self._guess_filename(self.obj)\n if filename is not None:\n self.set_content_disposition('attachment', filename=filename)\n\n def _guess_content_length(self, obj):\n if isinstance(obj, bytes):\n return len(obj)\n elif isinstance(obj, str):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n charset = params.get('charset', 'us-ascii')\n return len(obj.encode(charset))\n elif isinstance(obj, io.StringIO):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n charset = params.get('charset', 'us-ascii')\n return len(obj.getvalue().encode(charset)) - obj.tell()\n elif isinstance(obj, io.BytesIO):\n return len(obj.getvalue()) - obj.tell()\n elif isinstance(obj, io.IOBase):\n try:\n return os.fstat(obj.fileno()).st_size - obj.tell()\n except (AttributeError, OSError):\n return None\n else:\n return None\n\n def _guess_content_type(self, obj, default='application/octet-stream'):\n if hasattr(obj, 'name'):\n name = getattr(obj, 'name')\n return mimetypes.guess_type(name)[0]\n elif isinstance(obj, (str, io.StringIO)):\n return 'text/plain; charset=utf-8'\n else:\n return default\n\n def _guess_filename(self, obj):\n if isinstance(obj, io.IOBase):\n name = getattr(obj, 'name', None)\n if name is not None:\n return Path(name).name\n\n def serialize(self):\n \"\"\"Yields byte chunks for body part.\"\"\"\n\n has_encoding = (\n CONTENT_ENCODING in self.headers and\n self.headers[CONTENT_ENCODING] != 'identity' or\n CONTENT_TRANSFER_ENCODING in self.headers\n )\n if has_encoding:\n # since we're following streaming approach which doesn't assumes\n # any intermediate buffers, we cannot calculate real content length\n # with the specified content encoding scheme. So, instead of lying\n # about content length and cause reading issues, we have to strip\n # this information.\n self.headers.pop(CONTENT_LENGTH, None)\n\n if self.headers:\n yield b'\\r\\n'.join(\n b': '.join(map(lambda i: i.encode('latin1'), item))\n for item in self.headers.items()\n )\n yield b'\\r\\n\\r\\n'\n yield from self._maybe_encode_stream(self._serialize_obj())\n yield b'\\r\\n'\n\n def _serialize_obj(self):\n obj = self.obj\n mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))\n serializer = self._serialize_map.get((mtype, stype))\n if serializer is not None:\n return serializer(obj)\n\n for key in self._serialize_map:\n if not isinstance(key, tuple) and isinstance(obj, key):\n return self._serialize_map[key](obj)\n return self._serialize_default(obj)\n\n def _serialize_bytes(self, obj):\n yield obj\n\n def _serialize_str(self, obj):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n yield obj.encode(params.get('charset', 'us-ascii'))\n\n def _serialize_io(self, obj):\n while True:\n chunk = obj.read(self._chunk_size)\n if not chunk:\n break\n if isinstance(chunk, str):\n yield from self._serialize_str(chunk)\n else:\n yield from self._serialize_bytes(chunk)\n\n def _serialize_multipart(self, obj):\n yield from obj.serialize()\n\n def _serialize_json(self, obj):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n yield json.dumps(obj).encode(params.get('charset', 'utf-8'))\n\n def _serialize_form(self, obj):\n if isinstance(obj, Mapping):\n obj = list(obj.items())\n return self._serialize_str(urlencode(obj, doseq=True))\n\n def _serialize_default(self, obj):\n raise TypeError('unknown body part type %r' % type(obj))\n\n def _maybe_encode_stream(self, stream):\n if CONTENT_ENCODING in self.headers:\n stream = self._apply_content_encoding(stream)\n if CONTENT_TRANSFER_ENCODING in self.headers:\n stream = self._apply_content_transfer_encoding(stream)\n yield from stream\n\n def _apply_content_encoding(self, stream):\n encoding = self.headers[CONTENT_ENCODING].lower()\n if encoding == 'identity':\n yield from stream\n elif encoding in ('deflate', 'gzip'):\n if encoding == 'gzip':\n zlib_mode = 16 + zlib.MAX_WBITS\n else:\n zlib_mode = -zlib.MAX_WBITS\n zcomp = zlib.compressobj(wbits=zlib_mode)\n for chunk in stream:\n yield zcomp.compress(chunk)\n else:\n yield zcomp.flush()\n else:\n raise RuntimeError('unknown content encoding: {}'\n ''.format(encoding))\n\n def _apply_content_transfer_encoding(self, stream):\n encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()\n if encoding == 'base64':\n buffer = bytearray()\n while True:\n if buffer:\n div, mod = divmod(len(buffer), 3)\n chunk, buffer = buffer[:div * 3], buffer[div * 3:]\n if chunk:\n yield base64.b64encode(chunk)\n chunk = next(stream, None)\n if not chunk:\n if buffer:\n yield base64.b64encode(buffer[:])\n return\n buffer.extend(chunk)\n elif encoding == 'quoted-printable':\n for chunk in stream:\n yield binascii.b2a_qp(chunk)\n elif encoding == 'binary':\n yield from stream\n else:\n raise RuntimeError('unknown content transfer encoding: {}'\n ''.format(encoding))\n\n def set_content_disposition(self, disptype, **params):\n \"\"\"Sets ``Content-Disposition`` header.\n\n :param str disptype: Disposition type: inline, attachment, form-data.\n Should be valid extension token (see RFC 2183)\n :param dict params: Disposition params\n \"\"\"\n if not disptype or not (TOKEN > set(disptype)):\n raise ValueError('bad content disposition type {!r}'\n ''.format(disptype))\n value = disptype\n if params:\n lparams = []\n for key, val in params.items():\n if not key or not (TOKEN > set(key)):\n raise ValueError('bad content disposition parameter'\n ' {!r}={!r}'.format(key, val))\n qval = quote(val, '')\n lparams.append((key, '\"%s\"' % qval))\n if key == 'filename':\n lparams.append(('filename*', \"utf-8''\" + qval))\n sparams = '; '.join('='.join(pair) for pair in lparams)\n value = '; '.join((value, sparams))\n self.headers[CONTENT_DISPOSITION] = value\n\n @property\n def filename(self):\n \"\"\"Returns filename specified in Content-Disposition header or ``None``\n if missed.\"\"\"\n _, params = parse_content_disposition(\n self.headers.get(CONTENT_DISPOSITION))\n return content_disposition_filename(params)\n\n\nclass MultipartWriter(object):\n \"\"\"Multipart body writer.\"\"\"\n\n #: Body part reader class for non multipart/* content types.\n part_writer_cls = BodyPartWriter\n\n def __init__(self, subtype='mixed', boundary=None):\n boundary = boundary if boundary is not None else uuid.uuid4().hex\n try:\n boundary.encode('us-ascii')\n except UnicodeEncodeError:\n raise ValueError('boundary should contains ASCII only chars')\n self.headers = CIMultiDict()\n self.headers[CONTENT_TYPE] = 'multipart/{}; boundary=\"{}\"'.format(\n subtype, boundary\n )\n self.parts = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def __iter__(self):\n return iter(self.parts)\n\n def __len__(self):\n return len(self.parts)\n\n @property\n def boundary(self):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n return params['boundary'].encode('us-ascii')\n\n def append(self, obj, headers=None):\n \"\"\"Adds a new body part to multipart writer.\"\"\"\n if isinstance(obj, self.part_writer_cls):\n if headers:\n obj.headers.update(headers)\n self.parts.append(obj)\n else:\n if not headers:\n headers = CIMultiDict()\n self.parts.append(self.part_writer_cls(obj, headers))\n return self.parts[-1]\n\n def append_json(self, obj, headers=None):\n \"\"\"Helper to append JSON part.\"\"\"\n if not headers:\n headers = CIMultiDict()\n headers[CONTENT_TYPE] = 'application/json'\n return self.append(obj, headers)\n\n def append_form(self, obj, headers=None):\n \"\"\"Helper to append form urlencoded part.\"\"\"\n if not headers:\n headers = CIMultiDict()\n headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'\n assert isinstance(obj, (Sequence, Mapping))\n return self.append(obj, headers)\n\n def serialize(self):\n \"\"\"Yields multipart byte chunks.\"\"\"\n if not self.parts:\n yield b''\n return\n\n for part in self.parts:\n yield b'--' + self.boundary + b'\\r\\n'\n yield from part.serialize()\n else:\n yield b'--' + self.boundary + b'--\\r\\n'\n\n yield b''\n", "path": "aiohttp/multipart.py" } ]
[ { "content": "import asyncio\nimport base64\nimport binascii\nimport io\nimport json\nimport mimetypes\nimport os\nimport re\nimport sys\nimport uuid\nimport warnings\nimport zlib\nfrom collections import Mapping, Sequence, deque\nfrom pathlib import Path\nfrom urllib.parse import parse_qsl, quote, unquote, urlencode\n\nfrom multidict import CIMultiDict\n\nfrom .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH,\n CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)\nfrom .helpers import parse_mimetype\nfrom .protocol import HttpParser\n\n__all__ = ('MultipartReader', 'MultipartWriter',\n 'BodyPartReader', 'BodyPartWriter',\n 'BadContentDispositionHeader', 'BadContentDispositionParam',\n 'parse_content_disposition', 'content_disposition_filename')\n\n\nCHAR = set(chr(i) for i in range(0, 128))\nCTL = set(chr(i) for i in range(0, 32)) | {chr(127), }\nSEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\\\', '\"', '/', '[', ']',\n '?', '=', '{', '}', ' ', chr(9)}\nTOKEN = CHAR ^ CTL ^ SEPARATORS\n\nPY_35 = sys.version_info >= (3, 5)\nPY_352 = sys.version_info >= (3, 5, 2)\n\n\nclass BadContentDispositionHeader(RuntimeWarning):\n pass\n\n\nclass BadContentDispositionParam(RuntimeWarning):\n pass\n\n\ndef parse_content_disposition(header):\n def is_token(string):\n return string and TOKEN >= set(string)\n\n def is_quoted(string):\n return string[0] == string[-1] == '\"'\n\n def is_rfc5987(string):\n return is_token(string) and string.count(\"'\") == 2\n\n def is_extended_param(string):\n return string.endswith('*')\n\n def is_continuous_param(string):\n pos = string.find('*') + 1\n if not pos:\n return False\n substring = string[pos:-1] if string.endswith('*') else string[pos:]\n return substring.isdigit()\n\n def unescape(text, *, chars=''.join(map(re.escape, CHAR))):\n return re.sub('\\\\\\\\([{}])'.format(chars), '\\\\1', text)\n\n if not header:\n return None, {}\n\n disptype, *parts = header.split(';')\n if not is_token(disptype):\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n params = {}\n for item in parts:\n if '=' not in item:\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n key, value = item.split('=', 1)\n key = key.lower().strip()\n value = value.lstrip()\n\n if key in params:\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n if not is_token(key):\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n elif is_continuous_param(key):\n if is_quoted(value):\n value = unescape(value[1:-1])\n elif not is_token(value):\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n elif is_extended_param(key):\n if is_rfc5987(value):\n encoding, _, value = value.split(\"'\", 2)\n encoding = encoding or 'utf-8'\n else:\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n try:\n value = unquote(value, encoding, 'strict')\n except UnicodeDecodeError: # pragma: nocover\n warnings.warn(BadContentDispositionParam(item))\n continue\n\n else:\n if is_quoted(value):\n value = unescape(value[1:-1].lstrip('\\\\/'))\n elif not is_token(value):\n warnings.warn(BadContentDispositionHeader(header))\n return None, {}\n\n params[key] = value\n\n return disptype.lower(), params\n\n\ndef content_disposition_filename(params):\n if not params:\n return None\n elif 'filename*' in params:\n return params['filename*']\n elif 'filename' in params:\n return params['filename']\n else:\n parts = []\n fnparams = sorted((key, value)\n for key, value in params.items()\n if key.startswith('filename*'))\n for num, (key, value) in enumerate(fnparams):\n _, tail = key.split('*', 1)\n if tail.endswith('*'):\n tail = tail[:-1]\n if tail == str(num):\n parts.append(value)\n else:\n break\n if not parts:\n return None\n value = ''.join(parts)\n if \"'\" in value:\n encoding, _, value = value.split(\"'\", 2)\n encoding = encoding or 'utf-8'\n return unquote(value, encoding, 'strict')\n return value\n\n\nclass MultipartResponseWrapper(object):\n \"\"\"Wrapper around the :class:`MultipartBodyReader` to take care about\n underlying connection and close it when it needs in.\"\"\"\n\n def __init__(self, resp, stream):\n self.resp = resp\n self.stream = stream\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n def at_eof(self):\n \"\"\"Returns ``True`` when all response data had been read.\n\n :rtype: bool\n \"\"\"\n return self.resp.content.at_eof()\n\n @asyncio.coroutine\n def next(self):\n \"\"\"Emits next multipart reader object.\"\"\"\n item = yield from self.stream.next()\n if self.stream.at_eof():\n yield from self.release()\n return item\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Releases the connection gracefully, reading all the content\n to the void.\"\"\"\n yield from self.resp.release()\n\n\nclass BodyPartReader(object):\n \"\"\"Multipart reader for single body part.\"\"\"\n\n chunk_size = 8192\n\n def __init__(self, boundary, headers, content):\n self.headers = headers\n self._boundary = boundary\n self._content = content\n self._at_eof = False\n length = self.headers.get(CONTENT_LENGTH, None)\n self._length = int(length) if length is not None else None\n self._read_bytes = 0\n self._unread = deque()\n self._prev_chunk = None\n self._content_eof = 0\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n @asyncio.coroutine\n def next(self):\n item = yield from self.read()\n if not item:\n return None\n return item\n\n @asyncio.coroutine\n def read(self, *, decode=False):\n \"\"\"Reads body part data.\n\n :param bool decode: Decodes data following by encoding\n method from `Content-Encoding` header. If it missed\n data remains untouched\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n data = bytearray()\n if self._length is None:\n while not self._at_eof:\n data.extend((yield from self.readline()))\n else:\n while not self._at_eof:\n data.extend((yield from self.read_chunk(self.chunk_size)))\n if decode:\n return self.decode(data)\n return data\n\n @asyncio.coroutine\n def read_chunk(self, size=chunk_size):\n \"\"\"Reads body part content chunk of the specified size.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n if self._length:\n chunk = yield from self._read_chunk_from_length(size)\n else:\n chunk = yield from self._read_chunk_from_stream(size)\n\n self._read_bytes += len(chunk)\n if self._read_bytes == self._length:\n self._at_eof = True\n if self._at_eof:\n assert b'\\r\\n' == (yield from self._content.readline()), \\\n 'reader did not read all the data or it is malformed'\n return chunk\n\n @asyncio.coroutine\n def _read_chunk_from_length(self, size):\n \"\"\"Reads body part content chunk of the specified size.\n The body part must has `Content-Length` header with proper value.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n assert self._length is not None, \\\n 'Content-Length required for chunked read'\n chunk_size = min(size, self._length - self._read_bytes)\n chunk = yield from self._content.read(chunk_size)\n return chunk\n\n @asyncio.coroutine\n def _read_chunk_from_stream(self, size):\n \"\"\"Reads content chunk of body part with unknown length.\n The `Content-Length` header for body part is not necessary.\n\n :param int size: chunk size\n\n :rtype: bytearray\n \"\"\"\n assert size >= len(self._boundary) + 2, \\\n 'Chunk size must be greater or equal than boundary length + 2'\n first_chunk = self._prev_chunk is None\n if first_chunk:\n self._prev_chunk = yield from self._content.read(size)\n\n chunk = yield from self._content.read(size)\n self._content_eof += int(self._content.at_eof())\n assert self._content_eof < 3, \"Reading after EOF\"\n window = self._prev_chunk + chunk\n sub = b'\\r\\n' + self._boundary\n if first_chunk:\n idx = window.find(sub)\n else:\n idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))\n if idx >= 0:\n # pushing boundary back to content\n self._content.unread_data(window[idx:])\n if size > idx:\n self._prev_chunk = self._prev_chunk[:idx]\n chunk = window[len(self._prev_chunk):idx]\n if not chunk:\n self._at_eof = True\n result = self._prev_chunk\n self._prev_chunk = chunk\n return result\n\n @asyncio.coroutine\n def readline(self):\n \"\"\"Reads body part by line by line.\n\n :rtype: bytearray\n \"\"\"\n if self._at_eof:\n return b''\n\n if self._unread:\n line = self._unread.popleft()\n else:\n line = yield from self._content.readline()\n\n if line.startswith(self._boundary):\n # the very last boundary may not come with \\r\\n,\n # so set single rules for everyone\n sline = line.rstrip(b'\\r\\n')\n boundary = self._boundary\n last_boundary = self._boundary + b'--'\n # ensure that we read exactly the boundary, not something alike\n if sline == boundary or sline == last_boundary:\n self._at_eof = True\n self._unread.append(line)\n return b''\n else:\n next_line = yield from self._content.readline()\n if next_line.startswith(self._boundary):\n line = line[:-2] # strip CRLF but only once\n self._unread.append(next_line)\n\n return line\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Like :meth:`read`, but reads all the data to the void.\n\n :rtype: None\n \"\"\"\n if self._at_eof:\n return\n if self._length is None:\n while not self._at_eof:\n yield from self.readline()\n else:\n while not self._at_eof:\n yield from self.read_chunk(self.chunk_size)\n\n @asyncio.coroutine\n def text(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body part contains text data.\n\n :param str encoding: Custom text encoding. Overrides specified\n in charset param of `Content-Type` header\n\n :rtype: str\n \"\"\"\n data = yield from self.read(decode=True)\n encoding = encoding or self.get_charset(default='latin1')\n return data.decode(encoding)\n\n @asyncio.coroutine\n def json(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body parts contains JSON data.\n\n :param str encoding: Custom JSON encoding. Overrides specified\n in charset param of `Content-Type` header\n \"\"\"\n data = yield from self.read(decode=True)\n if not data:\n return None\n encoding = encoding or self.get_charset(default='utf-8')\n return json.loads(data.decode(encoding))\n\n @asyncio.coroutine\n def form(self, *, encoding=None):\n \"\"\"Like :meth:`read`, but assumes that body parts contains form\n urlencoded data.\n\n :param str encoding: Custom form encoding. Overrides specified\n in charset param of `Content-Type` header\n \"\"\"\n data = yield from self.read(decode=True)\n if not data:\n return None\n encoding = encoding or self.get_charset(default='utf-8')\n return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)\n\n def at_eof(self):\n \"\"\"Returns ``True`` if the boundary was reached or\n ``False`` otherwise.\n\n :rtype: bool\n \"\"\"\n return self._at_eof\n\n def decode(self, data):\n \"\"\"Decodes data according the specified `Content-Encoding`\n or `Content-Transfer-Encoding` headers value.\n\n Supports ``gzip``, ``deflate`` and ``identity`` encodings for\n `Content-Encoding` header.\n\n Supports ``base64``, ``quoted-printable``, ``binary`` encodings for\n `Content-Transfer-Encoding` header.\n\n :param bytearray data: Data to decode.\n\n :raises: :exc:`RuntimeError` - if encoding is unknown.\n\n :rtype: bytes\n \"\"\"\n if CONTENT_TRANSFER_ENCODING in self.headers:\n data = self._decode_content_transfer(data)\n if CONTENT_ENCODING in self.headers:\n return self._decode_content(data)\n return data\n\n def _decode_content(self, data):\n encoding = self.headers[CONTENT_ENCODING].lower()\n\n if encoding == 'deflate':\n return zlib.decompress(data, -zlib.MAX_WBITS)\n elif encoding == 'gzip':\n return zlib.decompress(data, 16 + zlib.MAX_WBITS)\n elif encoding == 'identity':\n return data\n else:\n raise RuntimeError('unknown content encoding: {}'.format(encoding))\n\n def _decode_content_transfer(self, data):\n encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()\n\n if encoding == 'base64':\n return base64.b64decode(data)\n elif encoding == 'quoted-printable':\n return binascii.a2b_qp(data)\n elif encoding == 'binary':\n return data\n else:\n raise RuntimeError('unknown content transfer encoding: {}'\n ''.format(encoding))\n\n def get_charset(self, default=None):\n \"\"\"Returns charset parameter from ``Content-Type`` header or default.\n \"\"\"\n ctype = self.headers.get(CONTENT_TYPE, '')\n *_, params = parse_mimetype(ctype)\n return params.get('charset', default)\n\n @property\n def filename(self):\n \"\"\"Returns filename specified in Content-Disposition header or ``None``\n if missed or header is malformed.\"\"\"\n _, params = parse_content_disposition(\n self.headers.get(CONTENT_DISPOSITION))\n return content_disposition_filename(params)\n\n\nclass MultipartReader(object):\n \"\"\"Multipart body reader.\"\"\"\n\n #: Response wrapper, used when multipart readers constructs from response.\n response_wrapper_cls = MultipartResponseWrapper\n #: Multipart reader class, used to handle multipart/* body parts.\n #: None points to type(self)\n multipart_reader_cls = None\n #: Body part reader class for non multipart/* content types.\n part_reader_cls = BodyPartReader\n\n def __init__(self, headers, content):\n self.headers = headers\n self._boundary = ('--' + self._get_boundary()).encode()\n self._content = content\n self._last_part = None\n self._at_eof = False\n self._at_bof = True\n self._unread = []\n\n if PY_35:\n def __aiter__(self):\n return self\n\n if not PY_352: # pragma: no cover\n __aiter__ = asyncio.coroutine(__aiter__)\n\n @asyncio.coroutine\n def __anext__(self):\n part = yield from self.next()\n if part is None:\n raise StopAsyncIteration # NOQA\n return part\n\n @classmethod\n def from_response(cls, response):\n \"\"\"Constructs reader instance from HTTP response.\n\n :param response: :class:`~aiohttp.client.ClientResponse` instance\n \"\"\"\n obj = cls.response_wrapper_cls(response, cls(response.headers,\n response.content))\n return obj\n\n def at_eof(self):\n \"\"\"Returns ``True`` if the final boundary was reached or\n ``False`` otherwise.\n\n :rtype: bool\n \"\"\"\n return self._at_eof\n\n @asyncio.coroutine\n def next(self):\n \"\"\"Emits the next multipart body part.\"\"\"\n # So, if we're at BOF, we need to skip till the boundary.\n if self._at_eof:\n return\n yield from self._maybe_release_last_part()\n if self._at_bof:\n yield from self._read_until_first_boundary()\n self._at_bof = False\n else:\n yield from self._read_boundary()\n if self._at_eof: # we just read the last boundary, nothing to do there\n return\n self._last_part = yield from self.fetch_next_part()\n return self._last_part\n\n @asyncio.coroutine\n def release(self):\n \"\"\"Reads all the body parts to the void till the final boundary.\"\"\"\n while not self._at_eof:\n item = yield from self.next()\n if item is None:\n break\n yield from item.release()\n\n @asyncio.coroutine\n def fetch_next_part(self):\n \"\"\"Returns the next body part reader.\"\"\"\n headers = yield from self._read_headers()\n return self._get_part_reader(headers)\n\n def _get_part_reader(self, headers):\n \"\"\"Dispatches the response by the `Content-Type` header, returning\n suitable reader instance.\n\n :param dict headers: Response headers\n \"\"\"\n ctype = headers.get(CONTENT_TYPE, '')\n mtype, *_ = parse_mimetype(ctype)\n if mtype == 'multipart':\n if self.multipart_reader_cls is None:\n return type(self)(headers, self._content)\n return self.multipart_reader_cls(headers, self._content)\n else:\n return self.part_reader_cls(self._boundary, headers, self._content)\n\n def _get_boundary(self):\n mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])\n\n assert mtype == 'multipart', 'multipart/* content type expected'\n\n if 'boundary' not in params:\n raise ValueError('boundary missed for Content-Type: %s'\n % self.headers[CONTENT_TYPE])\n\n boundary = params['boundary']\n if len(boundary) > 70:\n raise ValueError('boundary %r is too long (70 chars max)'\n % boundary)\n\n return boundary\n\n @asyncio.coroutine\n def _readline(self):\n if self._unread:\n return self._unread.pop()\n return (yield from self._content.readline())\n\n @asyncio.coroutine\n def _read_until_first_boundary(self):\n while True:\n chunk = yield from self._readline()\n if chunk == b'':\n raise ValueError(\"Could not find starting boundary %r\"\n % (self._boundary))\n chunk = chunk.rstrip()\n if chunk == self._boundary:\n return\n elif chunk == self._boundary + b'--':\n self._at_eof = True\n return\n\n @asyncio.coroutine\n def _read_boundary(self):\n chunk = (yield from self._readline()).rstrip()\n if chunk == self._boundary:\n pass\n elif chunk == self._boundary + b'--':\n self._at_eof = True\n else:\n raise ValueError('Invalid boundary %r, expected %r'\n % (chunk, self._boundary))\n\n @asyncio.coroutine\n def _read_headers(self):\n lines = [b'']\n while True:\n chunk = yield from self._content.readline()\n chunk = chunk.strip()\n lines.append(chunk)\n if not chunk:\n break\n parser = HttpParser()\n headers, *_ = parser.parse_headers(lines)\n return headers\n\n @asyncio.coroutine\n def _maybe_release_last_part(self):\n \"\"\"Ensures that the last read body part is read completely.\"\"\"\n if self._last_part is not None:\n if not self._last_part.at_eof():\n yield from self._last_part.release()\n self._unread.extend(self._last_part._unread)\n self._last_part = None\n\n\nclass BodyPartWriter(object):\n \"\"\"Multipart writer for single body part.\"\"\"\n\n def __init__(self, obj, headers=None, *, chunk_size=8192):\n if headers is None:\n headers = CIMultiDict()\n elif not isinstance(headers, CIMultiDict):\n headers = CIMultiDict(headers)\n\n self.obj = obj\n self.headers = headers\n self._chunk_size = chunk_size\n self._fill_headers_with_defaults()\n\n self._serialize_map = {\n bytes: self._serialize_bytes,\n str: self._serialize_str,\n io.IOBase: self._serialize_io,\n MultipartWriter: self._serialize_multipart,\n ('application', 'json'): self._serialize_json,\n ('application', 'x-www-form-urlencoded'): self._serialize_form\n }\n\n def _fill_headers_with_defaults(self):\n if CONTENT_TYPE not in self.headers:\n content_type = self._guess_content_type(self.obj)\n if content_type is not None:\n self.headers[CONTENT_TYPE] = content_type\n\n if CONTENT_LENGTH not in self.headers:\n content_length = self._guess_content_length(self.obj)\n if content_length is not None:\n self.headers[CONTENT_LENGTH] = str(content_length)\n\n if CONTENT_DISPOSITION not in self.headers:\n filename = self._guess_filename(self.obj)\n if filename is not None:\n self.set_content_disposition('attachment', filename=filename)\n\n def _guess_content_length(self, obj):\n if isinstance(obj, bytes):\n return len(obj)\n elif isinstance(obj, str):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n charset = params.get('charset', 'us-ascii')\n return len(obj.encode(charset))\n elif isinstance(obj, io.StringIO):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n charset = params.get('charset', 'us-ascii')\n return len(obj.getvalue().encode(charset)) - obj.tell()\n elif isinstance(obj, io.BytesIO):\n return len(obj.getvalue()) - obj.tell()\n elif isinstance(obj, io.IOBase):\n try:\n return os.fstat(obj.fileno()).st_size - obj.tell()\n except (AttributeError, OSError):\n return None\n else:\n return None\n\n def _guess_content_type(self, obj, default='application/octet-stream'):\n if hasattr(obj, 'name'):\n name = getattr(obj, 'name')\n return mimetypes.guess_type(name)[0]\n elif isinstance(obj, (str, io.StringIO)):\n return 'text/plain; charset=utf-8'\n else:\n return default\n\n def _guess_filename(self, obj):\n if isinstance(obj, io.IOBase):\n name = getattr(obj, 'name', None)\n if name is not None:\n return Path(name).name\n\n def serialize(self):\n \"\"\"Yields byte chunks for body part.\"\"\"\n\n has_encoding = (\n CONTENT_ENCODING in self.headers and\n self.headers[CONTENT_ENCODING] != 'identity' or\n CONTENT_TRANSFER_ENCODING in self.headers\n )\n if has_encoding:\n # since we're following streaming approach which doesn't assumes\n # any intermediate buffers, we cannot calculate real content length\n # with the specified content encoding scheme. So, instead of lying\n # about content length and cause reading issues, we have to strip\n # this information.\n self.headers.pop(CONTENT_LENGTH, None)\n\n if self.headers:\n yield b'\\r\\n'.join(\n b': '.join(map(lambda i: i.encode('latin1'), item))\n for item in self.headers.items()\n )\n yield b'\\r\\n\\r\\n'\n yield from self._maybe_encode_stream(self._serialize_obj())\n yield b'\\r\\n'\n\n def _serialize_obj(self):\n obj = self.obj\n mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))\n serializer = self._serialize_map.get((mtype, stype))\n if serializer is not None:\n return serializer(obj)\n\n for key in self._serialize_map:\n if not isinstance(key, tuple) and isinstance(obj, key):\n return self._serialize_map[key](obj)\n return self._serialize_default(obj)\n\n def _serialize_bytes(self, obj):\n yield obj\n\n def _serialize_str(self, obj):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n yield obj.encode(params.get('charset', 'us-ascii'))\n\n def _serialize_io(self, obj):\n while True:\n chunk = obj.read(self._chunk_size)\n if not chunk:\n break\n if isinstance(chunk, str):\n yield from self._serialize_str(chunk)\n else:\n yield from self._serialize_bytes(chunk)\n\n def _serialize_multipart(self, obj):\n yield from obj.serialize()\n\n def _serialize_json(self, obj):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n yield json.dumps(obj).encode(params.get('charset', 'utf-8'))\n\n def _serialize_form(self, obj):\n if isinstance(obj, Mapping):\n obj = list(obj.items())\n return self._serialize_str(urlencode(obj, doseq=True))\n\n def _serialize_default(self, obj):\n raise TypeError('unknown body part type %r' % type(obj))\n\n def _maybe_encode_stream(self, stream):\n if CONTENT_ENCODING in self.headers:\n stream = self._apply_content_encoding(stream)\n if CONTENT_TRANSFER_ENCODING in self.headers:\n stream = self._apply_content_transfer_encoding(stream)\n yield from stream\n\n def _apply_content_encoding(self, stream):\n encoding = self.headers[CONTENT_ENCODING].lower()\n if encoding == 'identity':\n yield from stream\n elif encoding in ('deflate', 'gzip'):\n if encoding == 'gzip':\n zlib_mode = 16 + zlib.MAX_WBITS\n else:\n zlib_mode = -zlib.MAX_WBITS\n zcomp = zlib.compressobj(wbits=zlib_mode)\n for chunk in stream:\n yield zcomp.compress(chunk)\n else:\n yield zcomp.flush()\n else:\n raise RuntimeError('unknown content encoding: {}'\n ''.format(encoding))\n\n def _apply_content_transfer_encoding(self, stream):\n encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()\n if encoding == 'base64':\n buffer = bytearray()\n while True:\n if buffer:\n div, mod = divmod(len(buffer), 3)\n chunk, buffer = buffer[:div * 3], buffer[div * 3:]\n if chunk:\n yield base64.b64encode(chunk)\n chunk = next(stream, None)\n if not chunk:\n if buffer:\n yield base64.b64encode(buffer[:])\n return\n buffer.extend(chunk)\n elif encoding == 'quoted-printable':\n for chunk in stream:\n yield binascii.b2a_qp(chunk)\n elif encoding == 'binary':\n yield from stream\n else:\n raise RuntimeError('unknown content transfer encoding: {}'\n ''.format(encoding))\n\n def set_content_disposition(self, disptype, **params):\n \"\"\"Sets ``Content-Disposition`` header.\n\n :param str disptype: Disposition type: inline, attachment, form-data.\n Should be valid extension token (see RFC 2183)\n :param dict params: Disposition params\n \"\"\"\n if not disptype or not (TOKEN > set(disptype)):\n raise ValueError('bad content disposition type {!r}'\n ''.format(disptype))\n value = disptype\n if params:\n lparams = []\n for key, val in params.items():\n if not key or not (TOKEN > set(key)):\n raise ValueError('bad content disposition parameter'\n ' {!r}={!r}'.format(key, val))\n qval = quote(val, '')\n lparams.append((key, '\"%s\"' % qval))\n if key == 'filename':\n lparams.append(('filename*', \"utf-8''\" + qval))\n sparams = '; '.join('='.join(pair) for pair in lparams)\n value = '; '.join((value, sparams))\n self.headers[CONTENT_DISPOSITION] = value\n\n @property\n def filename(self):\n \"\"\"Returns filename specified in Content-Disposition header or ``None``\n if missed.\"\"\"\n _, params = parse_content_disposition(\n self.headers.get(CONTENT_DISPOSITION))\n return content_disposition_filename(params)\n\n\nclass MultipartWriter(object):\n \"\"\"Multipart body writer.\"\"\"\n\n #: Body part reader class for non multipart/* content types.\n part_writer_cls = BodyPartWriter\n\n def __init__(self, subtype='mixed', boundary=None):\n boundary = boundary if boundary is not None else uuid.uuid4().hex\n try:\n boundary.encode('us-ascii')\n except UnicodeEncodeError:\n raise ValueError('boundary should contains ASCII only chars')\n self.headers = CIMultiDict()\n self.headers[CONTENT_TYPE] = 'multipart/{}; boundary=\"{}\"'.format(\n subtype, boundary\n )\n self.parts = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def __iter__(self):\n return iter(self.parts)\n\n def __len__(self):\n return len(self.parts)\n\n @property\n def boundary(self):\n *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))\n return params['boundary'].encode('us-ascii')\n\n def append(self, obj, headers=None):\n \"\"\"Adds a new body part to multipart writer.\"\"\"\n if isinstance(obj, self.part_writer_cls):\n if headers:\n obj.headers.update(headers)\n self.parts.append(obj)\n else:\n if not headers:\n headers = CIMultiDict()\n self.parts.append(self.part_writer_cls(obj, headers))\n return self.parts[-1]\n\n def append_json(self, obj, headers=None):\n \"\"\"Helper to append JSON part.\"\"\"\n if not headers:\n headers = CIMultiDict()\n headers[CONTENT_TYPE] = 'application/json'\n return self.append(obj, headers)\n\n def append_form(self, obj, headers=None):\n \"\"\"Helper to append form urlencoded part.\"\"\"\n if not headers:\n headers = CIMultiDict()\n headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'\n assert isinstance(obj, (Sequence, Mapping))\n return self.append(obj, headers)\n\n def serialize(self):\n \"\"\"Yields multipart byte chunks.\"\"\"\n if not self.parts:\n yield b''\n return\n\n for part in self.parts:\n yield b'--' + self.boundary + b'\\r\\n'\n yield from part.serialize()\n else:\n yield b'--' + self.boundary + b'--\\r\\n'\n\n yield b''\n", "path": "aiohttp/multipart.py" } ]
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a26b3e5ad9b..ea2d92b41ee 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -39,6 +39,7 @@ If you like to use *virtualenv* please run: $ cd aiohttp $ virtualenv --python=`which python3` venv + $ . venv/bin/activate For standard python *venv*: @@ -46,6 +47,7 @@ For standard python *venv*: $ cd aiohttp $ python3 -m venv venv + $ . venv/bin/activate For *virtualenvwrapper* (my choice): diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 0a5a9a7146a..c5006a2d9f7 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -108,6 +108,7 @@ Pankaj Pandey Pau Freixes Paul Colomiets Philipp A. +Rafael Viotti Raúl Cumplido "Required Field" <[email protected]> Robert Lu diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index 093c85603a7..9c2c7db80ed 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -332,10 +332,6 @@ def _read_chunk_from_stream(self, size): chunk = window[len(self._prev_chunk):idx] if not chunk: self._at_eof = True - if 0 < len(chunk) < len(sub) and not self._content_eof: - self._prev_chunk += chunk - self._at_eof = False - return b'' result = self._prev_chunk self._prev_chunk = chunk return result diff --git a/docs/web.rst b/docs/web.rst index 6f2cdb2bcf2..a5ba41b7f25 100644 --- a/docs/web.rst +++ b/docs/web.rst @@ -591,7 +591,7 @@ should use :meth:`Request.multipart` which returns :ref:`multipart reader filename = mp3.filename - # You cannot relay on Content-Length if transfer is chunked. + # You cannot rely on Content-Length if transfer is chunked. size = 0 with open(os.path.join('/spool/yarrr-media/mp3/', filename), 'wb') as f: while True:
Method "read_chunk" of "BodyPartReader" returns zero bytes before eof ## Long story short I've implemented a multipart file upload handler inspired on code from the [docs](http://aiohttp.readthedocs.io/en/stable/web.html#file-uploads). My code is truncating part's data. I believe the problem is in the method `_read_chunk_from_stream`, which is used by `read_chunk` of `BodyPartReader`. That method is returning a zero-length `bytearray` before the part's EOF. This is the pseudo-code. reader = await request.multipart() part = await reader.next() arr = bytearray() while True: chunk = await part.read_chunk() # 8192 bytes by default. if not chunk: break arr.extend(chunk) ## Expected behaviour The loop ends when all the part's data has been read. ## Actual behaviour The loop ends before the part's data is exhausted, i.e., chunk becomes a zero-length `bytearray` prematurely. ## Steps to reproduce The code is part of a large web application so it's hard for me to give reproducible steps. But replacing the break condition to `if not part._at_eof` made the problem go away. reader = await request.multipart() part = await reader.next() arr = bytearray() while True: chunk = await part.read_chunk() # 8192 bytes by default. if not part._at_eof: # This fixed the problem. break arr.extend(chunk) ## Your environment Aiohttp 1.1.5 Python 3.5.1 from PSF macOS Sierra 10.12.1
cookiecutter__cookiecutter-608
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n FailedHookException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in, rmtree\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef apply_overwrites_to_context(context, overwrite_context):\n \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n for variable, overwrite in overwrite_context.items():\n if variable not in context:\n # Do not include variables which are not used in the template\n continue\n\n context_value = context[variable]\n\n if isinstance(context_value, list):\n # We are dealing with a choice variable\n if overwrite in context_value:\n # This overwrite is actually valid for the given context\n # Let's set it as default (by definition first item in list)\n # see ``cookiecutter.prompt.prompt_choice_for_config``\n context_value.remove(overwrite)\n context_value.insert(0, overwrite)\n else:\n # Simply overwrite the value for this variable\n context[variable] = overwrite\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n file_handle = open(context_file)\n try:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n apply_overwrites_to_context(obj, default_context)\n if extra_context:\n apply_overwrites_to_context(obj, extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n file_name_is_empty = os.path.isdir(outfile)\n if file_name_is_empty:\n logging.debug('The resulting file name is empty: {0}'.format(outfile))\n return\n\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):\n \"\"\"\n Run hook from repo directory, cleaning up project directory if hook fails\n \"\"\"\n with work_in(repo_dir):\n try:\n run_hook(hook_name, project_dir, context)\n except FailedHookException:\n rmtree(project_dir)\n logging.error(\"Stopping generation because %s\"\n \" hook script didn't exit sucessfully\" % hook_name)\n raise\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n _run_hook_from_repo_dir(repo_dir, 'post_gen_project', project_dir, context)\n\n return project_dir\n", "path": "cookiecutter/generate.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n FailedHookException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in, rmtree\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef apply_overwrites_to_context(context, overwrite_context):\n \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n for variable, overwrite in overwrite_context.items():\n if variable not in context:\n # Do not include variables which are not used in the template\n continue\n\n context_value = context[variable]\n\n if isinstance(context_value, list):\n # We are dealing with a choice variable\n if overwrite in context_value:\n # This overwrite is actually valid for the given context\n # Let's set it as default (by definition first item in list)\n # see ``cookiecutter.prompt.prompt_choice_for_config``\n context_value.remove(overwrite)\n context_value.insert(0, overwrite)\n else:\n # Simply overwrite the value for this variable\n context[variable] = overwrite\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n try:\n with open(context_file) as file_handle:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n apply_overwrites_to_context(obj, default_context)\n if extra_context:\n apply_overwrites_to_context(obj, extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n file_name_is_empty = os.path.isdir(outfile)\n if file_name_is_empty:\n logging.debug('The resulting file name is empty: {0}'.format(outfile))\n return\n\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context):\n \"\"\"\n Run hook from repo directory, cleaning up project directory if hook fails\n \"\"\"\n with work_in(repo_dir):\n try:\n run_hook(hook_name, project_dir, context)\n except FailedHookException:\n rmtree(project_dir)\n logging.error(\"Stopping generation because %s\"\n \" hook script didn't exit sucessfully\" % hook_name)\n raise\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n _run_hook_from_repo_dir(repo_dir, 'post_gen_project', project_dir, context)\n\n return project_dir\n", "path": "cookiecutter/generate.py" } ]
diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py index 7a4d4619c..aa9873434 100755 --- a/cookiecutter/generate.py +++ b/cookiecutter/generate.py @@ -87,9 +87,9 @@ def generate_context(context_file='cookiecutter.json', default_context=None, context = {} - file_handle = open(context_file) try: - obj = json.load(file_handle, object_pairs_hook=OrderedDict) + with open(context_file) as file_handle: + obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: # JSON decoding error. Let's throw a new exception that is more # friendly for the developer or user.
file_handle.close() is never called -- and it causes a bug!! In https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/generate.py#L90, `file_handle.close()` is never called, and there's no need for it to remain open. This is the first time in over 10 years of python programming that I've run into an actual issue with file handles left open, so I felt that the use of two exclamation points in the subject was warranted. I'm removing a temporary template after an unrelated error, and... ``` File "D:\anaconda32\lib\shutil.py", line 250, in rmtree os.remove(fullname) WindowsError: [Error 32] The process cannot access the file because it is being used by another process: '.\\tmpp2duu1\\cookiecutter.json' ``` This change in generate.py:90 helps python not stumble on this particular issue: ``` try: with open(context_file) as file_handle: obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: # ... ```
openai__gym-1730
[ { "content": "import numpy as np\n\nfrom .space import Space\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py" } ]
[ { "content": "import numpy as np\n\nfrom .space import Space\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py" } ]
diff --git a/gym/spaces/box.py b/gym/spaces/box.py index e137b807c1e..5b3edf15b15 100644 --- a/gym/spaces/box.py +++ b/gym/spaces/box.py @@ -93,6 +93,8 @@ def sample(self): sample[bounded] = self.np_random.uniform(low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape) + if self.dtype.kind == 'i': + sample = np.floor(sample) return sample.astype(self.dtype)
Sampling Bug Gym Version: 0.15.3 issue: Box samples numbers above the `high` parameter. ``` from gym.spaces import Box observation_space = Box(low=-3, high=-1, shape=(9,), dtype='int') print(observation_space.sample()) >> [ 0 -2 0 -2 0 -1 0 -2 0] ``` The current implementation samples float numbers from uniform distribution of [`low`, `high`] and then converts the resulting samples to desired `dtype`. This runs into the problem of sampling `low` parameter very rarely(and not uniformly) when `dtype` is `int`(as we are converting the floats back to int which results in ceil operation in case of negative numbers) i.e in the above example -3 is almost never sampled as most of the low sampled floats like -2.85, -2.9 get converted to -2. https://github.com/openai/gym/blob/0cd9266d986d470ed9c0dd87a41cd680b65cfe1c/gym/spaces/box.py#L93-L97
urllib3__urllib3-1304
[ { "content": "from __future__ import absolute_import\nimport datetime\nimport logging\nimport os\nimport sys\nimport socket\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport warnings\nfrom .packages import six\nfrom .packages.six.moves.http_client import HTTPConnection as _HTTPConnection\nfrom .packages.six.moves.http_client import HTTPException # noqa: F401\n\ntry: # Compiled with SSL?\n import ssl\n BaseSSLError = ssl.SSLError\nexcept (ImportError, AttributeError): # Platform-specific: No SSL.\n ssl = None\n\n class BaseSSLError(BaseException):\n pass\n\n\ntry: # Python 3:\n # Not a no-op, we're adding this to the namespace so it can be imported.\n ConnectionError = ConnectionError\nexcept NameError: # Python 2:\n class ConnectionError(Exception):\n pass\n\n\nfrom .exceptions import (\n NewConnectionError,\n ConnectTimeoutError,\n SubjectAltNameWarning,\n SystemTimeWarning,\n)\nfrom .packages.ssl_match_hostname import match_hostname, CertificateError\n\nfrom .util.ssl_ import (\n resolve_cert_reqs,\n resolve_ssl_version,\n assert_fingerprint,\n create_urllib3_context,\n ssl_wrap_socket\n)\n\n\nfrom .util import connection\n\nfrom ._collections import HTTPHeaderDict\n\nlog = logging.getLogger(__name__)\n\nport_by_scheme = {\n 'http': 80,\n 'https': 443,\n}\n\n# When updating RECENT_DATE, move it to\n# within two years of the current date, and no\n# earlier than 6 months ago.\nRECENT_DATE = datetime.date(2016, 1, 1)\n\n\nclass DummyConnection(object):\n \"\"\"Used to detect a failed ConnectionCls import.\"\"\"\n pass\n\n\nclass HTTPConnection(_HTTPConnection, object):\n \"\"\"\n Based on httplib.HTTPConnection but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`\n - ``source_address``: Set the source address for the current connection.\n\n .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x\n\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass::\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port = port_by_scheme['http']\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]\n\n #: Whether this connection verifies the host's certificate.\n is_verified = False\n\n def __init__(self, *args, **kw):\n if six.PY3: # Python 3\n kw.pop('strict', None)\n\n # Pre-set source_address in case we have an older Python like 2.6.\n self.source_address = kw.get('source_address')\n\n if sys.version_info < (2, 7): # Python 2.6\n # _HTTPConnection on Python 2.6 will balk at this keyword arg, but\n # not newer versions. We can still use it when creating a\n # connection though, so we pop it *after* we have saved it as\n # self.source_address.\n kw.pop('source_address', None)\n\n #: The socket options provided by the user. If no options are\n #: provided, we use the default options.\n self.socket_options = kw.pop('socket_options', self.default_socket_options)\n\n # Superclass also sets self.source_address in Python 2.7+.\n _HTTPConnection.__init__(self, *args, **kw)\n\n @property\n def host(self):\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip('.')\n\n @host.setter\n def host(self, value):\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self):\n \"\"\" Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = connection.create_connection(\n (self._dns_host, self.port), self.timeout, **extra_kw)\n\n except SocketTimeout as e:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except SocketError as e:\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n def _prepare_conn(self, conn):\n self.sock = conn\n # the _tunnel_host attribute was added in python 2.6.3 (via\n # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do\n # not have them.\n if getattr(self, '_tunnel_host', None):\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n def request_chunked(self, method, url, body=None, headers=None):\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n headers = HTTPHeaderDict(headers if headers is not None else {})\n skip_accept_encoding = 'accept-encoding' in headers\n skip_host = 'host' in headers\n self.putrequest(\n method,\n url,\n skip_accept_encoding=skip_accept_encoding,\n skip_host=skip_host\n )\n for header, value in headers.items():\n self.putheader(header, value)\n if 'transfer-encoding' not in headers:\n self.putheader('Transfer-Encoding', 'chunked')\n self.endheaders()\n\n if body is not None:\n stringish_types = six.string_types + (six.binary_type,)\n if isinstance(body, stringish_types):\n body = (body,)\n for chunk in body:\n if not chunk:\n continue\n if not isinstance(chunk, six.binary_type):\n chunk = chunk.encode('utf8')\n len_str = hex(len(chunk))[2:]\n self.send(len_str.encode('utf-8'))\n self.send(b'\\r\\n')\n self.send(chunk)\n self.send(b'\\r\\n')\n\n # After the if clause, to always have a closed body\n self.send(b'0\\r\\n\\r\\n')\n\n\nclass HTTPSConnection(HTTPConnection):\n default_port = port_by_scheme['https']\n\n ssl_version = None\n\n def __init__(self, host, port=None, key_file=None, cert_file=None,\n strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n ssl_context=None, **kw):\n\n HTTPConnection.__init__(self, host, port, strict=strict,\n timeout=timeout, **kw)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.ssl_context = ssl_context\n\n # Required property for Google AppEngine 1.9.0 which otherwise causes\n # HTTPS requests to go out as HTTP. (See Issue #356)\n self._protocol = 'https'\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n if self.ssl_context is None:\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(None),\n cert_reqs=resolve_cert_reqs(None),\n )\n\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n ssl_context=self.ssl_context,\n )\n\n\nclass VerifiedHTTPSConnection(HTTPSConnection):\n \"\"\"\n Based on httplib.HTTPSConnection but wraps the socket with\n SSL certification.\n \"\"\"\n cert_reqs = None\n ca_certs = None\n ca_cert_dir = None\n ssl_version = None\n assert_fingerprint = None\n\n def set_cert(self, key_file=None, cert_file=None,\n cert_reqs=None, ca_certs=None,\n assert_hostname=None, assert_fingerprint=None,\n ca_cert_dir=None):\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n # If cert_reqs is not provided, we can try to guess. If the user gave\n # us a cert database, we assume they want to use it: otherwise, if\n # they gave us an SSL Context object we should use whatever is set for\n # it.\n if cert_reqs is None:\n if ca_certs or ca_cert_dir:\n cert_reqs = 'CERT_REQUIRED'\n elif self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n\n def connect(self):\n # Add certificate verification\n conn = self._new_conn()\n\n hostname = self.host\n if getattr(self, '_tunnel_host', None):\n # _tunnel_host was added in Python 2.6.3\n # (See: http://hg.python.org/cpython/rev/0f57b30a152f)\n\n self.sock = conn\n # Calls self._set_hostport(), so self.host is\n # self._tunnel_host below.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n # Override the host with the one we're requesting data from.\n hostname = self._tunnel_host\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn((\n 'System time is way off (before {0}). This will probably '\n 'lead to SSL verification errors').format(RECENT_DATE),\n SystemTimeWarning\n )\n\n # Wrap socket using verification with the root certs in\n # trusted_root_certs\n if self.ssl_context is None:\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(self.ssl_version),\n cert_reqs=resolve_cert_reqs(self.cert_reqs),\n )\n\n context = self.ssl_context\n context.verify_mode = resolve_cert_reqs(self.cert_reqs)\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n server_hostname=hostname,\n ssl_context=context)\n\n if self.assert_fingerprint:\n assert_fingerprint(self.sock.getpeercert(binary_form=True),\n self.assert_fingerprint)\n elif context.verify_mode != ssl.CERT_NONE \\\n and not getattr(context, 'check_hostname', False) \\\n and self.assert_hostname is not False:\n # While urllib3 attempts to always turn off hostname matching from\n # the TLS library, this cannot always be done. So we check whether\n # the TLS Library still thinks it's matching hostnames.\n cert = self.sock.getpeercert()\n if not cert.get('subjectAltName', ()):\n warnings.warn((\n 'Certificate for {0} has no `subjectAltName`, falling back to check for a '\n '`commonName` for now. This feature is being removed by major browsers and '\n 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '\n 'for details.)'.format(hostname)),\n SubjectAltNameWarning\n )\n _match_hostname(cert, self.assert_hostname or hostname)\n\n self.is_verified = (\n context.verify_mode == ssl.CERT_REQUIRED or\n self.assert_fingerprint is not None\n )\n\n\ndef _match_hostname(cert, asserted_hostname):\n try:\n match_hostname(cert, asserted_hostname)\n except CertificateError as e:\n log.error(\n 'Certificate did not match expected hostname: %s. '\n 'Certificate: %s', asserted_hostname, cert\n )\n # Add cert to exception and reraise so client code can inspect\n # the cert when catching the exception, if they want to\n e._peer_cert = cert\n raise\n\n\nif ssl:\n # Make a copy for testing.\n UnverifiedHTTPSConnection = HTTPSConnection\n HTTPSConnection = VerifiedHTTPSConnection\nelse:\n HTTPSConnection = DummyConnection\n", "path": "urllib3/connection.py" } ]
[ { "content": "from __future__ import absolute_import\nimport datetime\nimport logging\nimport os\nimport sys\nimport socket\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport warnings\nfrom .packages import six\nfrom .packages.six.moves.http_client import HTTPConnection as _HTTPConnection\nfrom .packages.six.moves.http_client import HTTPException # noqa: F401\n\ntry: # Compiled with SSL?\n import ssl\n BaseSSLError = ssl.SSLError\nexcept (ImportError, AttributeError): # Platform-specific: No SSL.\n ssl = None\n\n class BaseSSLError(BaseException):\n pass\n\n\ntry: # Python 3:\n # Not a no-op, we're adding this to the namespace so it can be imported.\n ConnectionError = ConnectionError\nexcept NameError: # Python 2:\n class ConnectionError(Exception):\n pass\n\n\nfrom .exceptions import (\n NewConnectionError,\n ConnectTimeoutError,\n SubjectAltNameWarning,\n SystemTimeWarning,\n)\nfrom .packages.ssl_match_hostname import match_hostname, CertificateError\n\nfrom .util.ssl_ import (\n resolve_cert_reqs,\n resolve_ssl_version,\n assert_fingerprint,\n create_urllib3_context,\n ssl_wrap_socket\n)\n\n\nfrom .util import connection\n\nfrom ._collections import HTTPHeaderDict\n\nlog = logging.getLogger(__name__)\n\nport_by_scheme = {\n 'http': 80,\n 'https': 443,\n}\n\n# When updating RECENT_DATE, move it to within two years of the current date,\n# and not less than 6 months ago.\n# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or\n# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months)\nRECENT_DATE = datetime.date(2017, 6, 30)\n\n\nclass DummyConnection(object):\n \"\"\"Used to detect a failed ConnectionCls import.\"\"\"\n pass\n\n\nclass HTTPConnection(_HTTPConnection, object):\n \"\"\"\n Based on httplib.HTTPConnection but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`\n - ``source_address``: Set the source address for the current connection.\n\n .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x\n\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass::\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port = port_by_scheme['http']\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]\n\n #: Whether this connection verifies the host's certificate.\n is_verified = False\n\n def __init__(self, *args, **kw):\n if six.PY3: # Python 3\n kw.pop('strict', None)\n\n # Pre-set source_address in case we have an older Python like 2.6.\n self.source_address = kw.get('source_address')\n\n if sys.version_info < (2, 7): # Python 2.6\n # _HTTPConnection on Python 2.6 will balk at this keyword arg, but\n # not newer versions. We can still use it when creating a\n # connection though, so we pop it *after* we have saved it as\n # self.source_address.\n kw.pop('source_address', None)\n\n #: The socket options provided by the user. If no options are\n #: provided, we use the default options.\n self.socket_options = kw.pop('socket_options', self.default_socket_options)\n\n # Superclass also sets self.source_address in Python 2.7+.\n _HTTPConnection.__init__(self, *args, **kw)\n\n @property\n def host(self):\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip('.')\n\n @host.setter\n def host(self, value):\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self):\n \"\"\" Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = connection.create_connection(\n (self._dns_host, self.port), self.timeout, **extra_kw)\n\n except SocketTimeout as e:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except SocketError as e:\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n def _prepare_conn(self, conn):\n self.sock = conn\n # the _tunnel_host attribute was added in python 2.6.3 (via\n # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do\n # not have them.\n if getattr(self, '_tunnel_host', None):\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n def request_chunked(self, method, url, body=None, headers=None):\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n headers = HTTPHeaderDict(headers if headers is not None else {})\n skip_accept_encoding = 'accept-encoding' in headers\n skip_host = 'host' in headers\n self.putrequest(\n method,\n url,\n skip_accept_encoding=skip_accept_encoding,\n skip_host=skip_host\n )\n for header, value in headers.items():\n self.putheader(header, value)\n if 'transfer-encoding' not in headers:\n self.putheader('Transfer-Encoding', 'chunked')\n self.endheaders()\n\n if body is not None:\n stringish_types = six.string_types + (six.binary_type,)\n if isinstance(body, stringish_types):\n body = (body,)\n for chunk in body:\n if not chunk:\n continue\n if not isinstance(chunk, six.binary_type):\n chunk = chunk.encode('utf8')\n len_str = hex(len(chunk))[2:]\n self.send(len_str.encode('utf-8'))\n self.send(b'\\r\\n')\n self.send(chunk)\n self.send(b'\\r\\n')\n\n # After the if clause, to always have a closed body\n self.send(b'0\\r\\n\\r\\n')\n\n\nclass HTTPSConnection(HTTPConnection):\n default_port = port_by_scheme['https']\n\n ssl_version = None\n\n def __init__(self, host, port=None, key_file=None, cert_file=None,\n strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n ssl_context=None, **kw):\n\n HTTPConnection.__init__(self, host, port, strict=strict,\n timeout=timeout, **kw)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.ssl_context = ssl_context\n\n # Required property for Google AppEngine 1.9.0 which otherwise causes\n # HTTPS requests to go out as HTTP. (See Issue #356)\n self._protocol = 'https'\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n if self.ssl_context is None:\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(None),\n cert_reqs=resolve_cert_reqs(None),\n )\n\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n ssl_context=self.ssl_context,\n )\n\n\nclass VerifiedHTTPSConnection(HTTPSConnection):\n \"\"\"\n Based on httplib.HTTPSConnection but wraps the socket with\n SSL certification.\n \"\"\"\n cert_reqs = None\n ca_certs = None\n ca_cert_dir = None\n ssl_version = None\n assert_fingerprint = None\n\n def set_cert(self, key_file=None, cert_file=None,\n cert_reqs=None, ca_certs=None,\n assert_hostname=None, assert_fingerprint=None,\n ca_cert_dir=None):\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n # If cert_reqs is not provided, we can try to guess. If the user gave\n # us a cert database, we assume they want to use it: otherwise, if\n # they gave us an SSL Context object we should use whatever is set for\n # it.\n if cert_reqs is None:\n if ca_certs or ca_cert_dir:\n cert_reqs = 'CERT_REQUIRED'\n elif self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n\n def connect(self):\n # Add certificate verification\n conn = self._new_conn()\n\n hostname = self.host\n if getattr(self, '_tunnel_host', None):\n # _tunnel_host was added in Python 2.6.3\n # (See: http://hg.python.org/cpython/rev/0f57b30a152f)\n\n self.sock = conn\n # Calls self._set_hostport(), so self.host is\n # self._tunnel_host below.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n # Override the host with the one we're requesting data from.\n hostname = self._tunnel_host\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn((\n 'System time is way off (before {0}). This will probably '\n 'lead to SSL verification errors').format(RECENT_DATE),\n SystemTimeWarning\n )\n\n # Wrap socket using verification with the root certs in\n # trusted_root_certs\n if self.ssl_context is None:\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(self.ssl_version),\n cert_reqs=resolve_cert_reqs(self.cert_reqs),\n )\n\n context = self.ssl_context\n context.verify_mode = resolve_cert_reqs(self.cert_reqs)\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n server_hostname=hostname,\n ssl_context=context)\n\n if self.assert_fingerprint:\n assert_fingerprint(self.sock.getpeercert(binary_form=True),\n self.assert_fingerprint)\n elif context.verify_mode != ssl.CERT_NONE \\\n and not getattr(context, 'check_hostname', False) \\\n and self.assert_hostname is not False:\n # While urllib3 attempts to always turn off hostname matching from\n # the TLS library, this cannot always be done. So we check whether\n # the TLS Library still thinks it's matching hostnames.\n cert = self.sock.getpeercert()\n if not cert.get('subjectAltName', ()):\n warnings.warn((\n 'Certificate for {0} has no `subjectAltName`, falling back to check for a '\n '`commonName` for now. This feature is being removed by major browsers and '\n 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '\n 'for details.)'.format(hostname)),\n SubjectAltNameWarning\n )\n _match_hostname(cert, self.assert_hostname or hostname)\n\n self.is_verified = (\n context.verify_mode == ssl.CERT_REQUIRED or\n self.assert_fingerprint is not None\n )\n\n\ndef _match_hostname(cert, asserted_hostname):\n try:\n match_hostname(cert, asserted_hostname)\n except CertificateError as e:\n log.error(\n 'Certificate did not match expected hostname: %s. '\n 'Certificate: %s', asserted_hostname, cert\n )\n # Add cert to exception and reraise so client code can inspect\n # the cert when catching the exception, if they want to\n e._peer_cert = cert\n raise\n\n\nif ssl:\n # Make a copy for testing.\n UnverifiedHTTPSConnection = HTTPSConnection\n HTTPSConnection = VerifiedHTTPSConnection\nelse:\n HTTPSConnection = DummyConnection\n", "path": "urllib3/connection.py" } ]
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 4ef1684b97..22b4c04a48 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -246,5 +246,8 @@ In chronological order: * Akamai (through Jesse Shapiro) <[email protected]> * Ongoing maintenance +* Dominique Leuenberger <[email protected]> + * Minor fixes in the test suite + * [Your name or handle] <[email or website]> * [Brief summary of your changes] diff --git a/urllib3/connection.py b/urllib3/connection.py index 06bcbde1af..a03b573f01 100644 --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -56,10 +56,11 @@ class ConnectionError(Exception): 'https': 443, } -# When updating RECENT_DATE, move it to -# within two years of the current date, and no -# earlier than 6 months ago. -RECENT_DATE = datetime.date(2016, 1, 1) +# When updating RECENT_DATE, move it to within two years of the current date, +# and not less than 6 months ago. +# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or +# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) +RECENT_DATE = datetime.date(2017, 6, 30) class DummyConnection(object):
RECENT_DATE needs to be bumped Test suite started failing, as RECENT_DATE is not recent enough: ``` [ 62s] def test_recent_date(self): [ 62s] # This test is to make sure that the RECENT_DATE value [ 62s] # doesn't get too far behind what the current date is. [ 62s] # When this test fails update urllib3.connection.RECENT_DATE [ 62s] # according to the rules defined in that file. [ 62s] two_years = datetime.timedelta(days=365 * 2) [ 62s] > assert RECENT_DATE > (datetime.datetime.today() - two_years).date() [ 62s] E AssertionError: assert datetime.date(2016, 1, 1) > datetime.date(2016, 1, 1) [ 62s] E + where datetime.date(2016, 1, 1) = <built-in method date of datetime.datetime object at 0x7fb6899be198>() [ 62s] E + where <built-in method date of datetime.datetime object at 0x7fb6899be198> = (datetime.datetime(2017, 12, 31, 10, 16, 14, 472906) - datetime.timedelta(730)).date [ 62s] E + where datetime.datetime(2017, 12, 31, 10, 16, 14, 472906) = <built-in method today of type object at 0x7fb68e948d20>() [ 62s] E + where <built-in method today of type object at 0x7fb68e948d20> = <class 'datetime.datetime'>.today [ 62s] E + where <class 'datetime.datetime'> = datetime.datetime ```
conan-io__conan-3284
[ { "content": "import os\nimport shutil\nfrom six.moves.urllib.parse import urlparse\n\nfrom conans.tools import unzip\nfrom conans.util.files import rmdir, mkdir\nfrom conans.client.remote_registry import RemoteRegistry\nfrom conans import tools\nfrom conans.errors import ConanException\nimport subprocess\n\n\ndef _hide_password(resource):\n \"\"\"\n Hide password from url/file path\n\n :param resource: string with url or file path\n :return: resource with hidden password if present\n \"\"\"\n password = urlparse(resource).password\n return resource.replace(password, \"<hidden>\") if password else resource\n\n\ndef _handle_remotes(registry_path, remote_file, output):\n registry = RemoteRegistry(registry_path, output)\n new_registry = RemoteRegistry(remote_file, output)\n registry.define_remotes(new_registry.remotes)\n\n\ndef _handle_profiles(source_folder, target_folder, output):\n mkdir(target_folder)\n for root, _, files in os.walk(source_folder):\n relative_path = os.path.relpath(root, source_folder)\n if relative_path == \".\":\n relative_path = \"\"\n for f in files:\n profile = os.path.join(relative_path, f)\n output.info(\" Installing profile %s\" % profile)\n shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))\n\n\ndef _process_git_repo(repo_url, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to clone repo %s\" % repo_url)\n\n with tools.chdir(tmp_folder):\n try:\n subprocess.check_output('git -c http.sslVerify=%s clone \"%s\" config' % (verify_ssl, repo_url),\n shell=True)\n output.info(\"Repo cloned\")\n except Exception as e:\n raise ConanException(\"config install error. Can't clone repo: %s\" % str(e))\n\n tmp_folder = os.path.join(tmp_folder, \"config\")\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):\n unzip(zippath, tmp_folder)\n if remove:\n os.unlink(zippath)\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _handle_conan_conf(current_conan_conf, new_conan_conf_path):\n current_conan_conf.read(new_conan_conf_path)\n with open(current_conan_conf.filename, \"w\") as f:\n current_conan_conf.write(f)\n\n\ndef _process_folder(folder, client_cache, output):\n for root, dirs, files in os.walk(folder):\n for f in files:\n if f == \"settings.yml\":\n output.info(\"Installing settings.yml\")\n settings_path = client_cache.settings_path\n shutil.copy(os.path.join(root, f), settings_path)\n elif f == \"conan.conf\":\n output.info(\"Processing conan.conf\")\n conan_conf = client_cache.conan_config\n _handle_conan_conf(conan_conf, os.path.join(root, f))\n elif f == \"remotes.txt\":\n output.info(\"Defining remotes\")\n registry_path = client_cache.registry\n _handle_remotes(registry_path, os.path.join(root, f), output)\n else:\n relpath = os.path.relpath(root, folder)\n target_folder = os.path.join(client_cache.conan_folder, relpath)\n mkdir(target_folder)\n output.info(\"Copying file %s to %s\" % (f, target_folder))\n shutil.copy(os.path.join(root, f), target_folder)\n for d in dirs:\n if d == \"profiles\":\n output.info(\"Installing profiles\")\n profiles_path = client_cache.profiles_path\n _handle_profiles(os.path.join(root, d), profiles_path, output)\n break\n dirs[:] = [d for d in dirs if d not in (\"profiles\", \".git\")]\n\n\ndef _process_download(item, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to download %s\" % _hide_password(item))\n zippath = os.path.join(tmp_folder, \"config.zip\")\n try:\n tools.download(item, zippath, out=output, verify=verify_ssl)\n _process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)\n except Exception as e:\n raise ConanException(\"Error while installing config from %s\\n%s\" % (item, str(e)))\n\n\ndef configuration_install(item, client_cache, output, verify_ssl, config_type=None):\n tmp_folder = os.path.join(client_cache.conan_folder, \"tmp_config_install\")\n # necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/\n tmp_folder = os.path.realpath(tmp_folder)\n mkdir(tmp_folder)\n try:\n if item is None:\n try:\n item = client_cache.conan_config.get_item(\"general.config_install\")\n except ConanException:\n raise ConanException(\"Called config install without arguments and \"\n \"'general.config_install' not defined in conan.conf\")\n\n if item.endswith(\".git\") or config_type == \"git\":\n _process_git_repo(item, client_cache, output, tmp_folder, verify_ssl)\n elif os.path.exists(item):\n # is a local file\n _process_zip_file(item, client_cache, output, tmp_folder)\n elif item.startswith(\"http\"):\n _process_download(item, client_cache, output, tmp_folder, verify_ssl)\n else:\n raise ConanException(\"I don't know how to process %s\" % item)\n finally:\n if item:\n client_cache.conan_config.set_item(\"general.config_install\", item)\n rmdir(tmp_folder)\n", "path": "conans/client/conf/config_installer.py" } ]
[ { "content": "import os\nimport shutil\nfrom six.moves.urllib.parse import urlparse\n\nfrom conans.tools import unzip\nfrom conans.util.files import rmdir, mkdir\nfrom conans.client.remote_registry import RemoteRegistry\nfrom conans import tools\nfrom conans.errors import ConanException\nimport subprocess\n\n\ndef _hide_password(resource):\n \"\"\"\n Hide password from url/file path\n\n :param resource: string with url or file path\n :return: resource with hidden password if present\n \"\"\"\n password = urlparse(resource).password\n return resource.replace(password, \"<hidden>\") if password else resource\n\n\ndef _handle_remotes(registry_path, remote_file, output):\n registry = RemoteRegistry(registry_path, output)\n new_registry = RemoteRegistry(remote_file, output)\n registry.define_remotes(new_registry.remotes)\n\n\ndef _handle_profiles(source_folder, target_folder, output):\n mkdir(target_folder)\n for root, _, files in os.walk(source_folder):\n relative_path = os.path.relpath(root, source_folder)\n if relative_path == \".\":\n relative_path = \"\"\n for f in files:\n profile = os.path.join(relative_path, f)\n output.info(\" Installing profile %s\" % profile)\n shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))\n\n\ndef _process_git_repo(repo_url, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to clone repo %s\" % repo_url)\n\n with tools.chdir(tmp_folder):\n try:\n subprocess.check_output('git -c http.sslVerify=%s -c init.templateDir= clone \"%s\" config' % (verify_ssl, repo_url),\n shell=True)\n output.info(\"Repo cloned\")\n except Exception as e:\n raise ConanException(\"config install error. Can't clone repo: %s\" % str(e))\n\n tmp_folder = os.path.join(tmp_folder, \"config\")\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):\n unzip(zippath, tmp_folder)\n if remove:\n os.unlink(zippath)\n _process_folder(tmp_folder, client_cache, output)\n\n\ndef _handle_conan_conf(current_conan_conf, new_conan_conf_path):\n current_conan_conf.read(new_conan_conf_path)\n with open(current_conan_conf.filename, \"w\") as f:\n current_conan_conf.write(f)\n\n\ndef _process_folder(folder, client_cache, output):\n for root, dirs, files in os.walk(folder):\n for f in files:\n if f == \"settings.yml\":\n output.info(\"Installing settings.yml\")\n settings_path = client_cache.settings_path\n shutil.copy(os.path.join(root, f), settings_path)\n elif f == \"conan.conf\":\n output.info(\"Processing conan.conf\")\n conan_conf = client_cache.conan_config\n _handle_conan_conf(conan_conf, os.path.join(root, f))\n elif f == \"remotes.txt\":\n output.info(\"Defining remotes\")\n registry_path = client_cache.registry\n _handle_remotes(registry_path, os.path.join(root, f), output)\n else:\n relpath = os.path.relpath(root, folder)\n target_folder = os.path.join(client_cache.conan_folder, relpath)\n mkdir(target_folder)\n output.info(\"Copying file %s to %s\" % (f, target_folder))\n shutil.copy(os.path.join(root, f), target_folder)\n for d in dirs:\n if d == \"profiles\":\n output.info(\"Installing profiles\")\n profiles_path = client_cache.profiles_path\n _handle_profiles(os.path.join(root, d), profiles_path, output)\n break\n dirs[:] = [d for d in dirs if d not in (\"profiles\", \".git\")]\n\n\ndef _process_download(item, client_cache, output, tmp_folder, verify_ssl):\n output.info(\"Trying to download %s\" % _hide_password(item))\n zippath = os.path.join(tmp_folder, \"config.zip\")\n try:\n tools.download(item, zippath, out=output, verify=verify_ssl)\n _process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)\n except Exception as e:\n raise ConanException(\"Error while installing config from %s\\n%s\" % (item, str(e)))\n\n\ndef configuration_install(item, client_cache, output, verify_ssl, config_type=None):\n tmp_folder = os.path.join(client_cache.conan_folder, \"tmp_config_install\")\n # necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/\n tmp_folder = os.path.realpath(tmp_folder)\n mkdir(tmp_folder)\n try:\n if item is None:\n try:\n item = client_cache.conan_config.get_item(\"general.config_install\")\n except ConanException:\n raise ConanException(\"Called config install without arguments and \"\n \"'general.config_install' not defined in conan.conf\")\n\n if item.endswith(\".git\") or config_type == \"git\":\n _process_git_repo(item, client_cache, output, tmp_folder, verify_ssl)\n elif os.path.exists(item):\n # is a local file\n _process_zip_file(item, client_cache, output, tmp_folder)\n elif item.startswith(\"http\"):\n _process_download(item, client_cache, output, tmp_folder, verify_ssl)\n else:\n raise ConanException(\"I don't know how to process %s\" % item)\n finally:\n if item:\n client_cache.conan_config.set_item(\"general.config_install\", item)\n rmdir(tmp_folder)\n", "path": "conans/client/conf/config_installer.py" } ]
diff --git a/conans/client/conf/config_installer.py b/conans/client/conf/config_installer.py index 73af07c8789..04aa640e820 100644 --- a/conans/client/conf/config_installer.py +++ b/conans/client/conf/config_installer.py @@ -44,7 +44,7 @@ def _process_git_repo(repo_url, client_cache, output, tmp_folder, verify_ssl): with tools.chdir(tmp_folder): try: - subprocess.check_output('git -c http.sslVerify=%s clone "%s" config' % (verify_ssl, repo_url), + subprocess.check_output('git -c http.sslVerify=%s -c init.templateDir= clone "%s" config' % (verify_ssl, repo_url), shell=True) output.info("Repo cloned") except Exception as e:
Consider turning off template directories in Git for conan config install To help us debug your issue please explain: - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. This is with Conan 1.6.0 on Windows 10. To see this in progress, have a Git installation set up with automatic ctags generation akin to what's described in [Effortless Ctags with Git](https://tbaggery.com/2011/08/08/effortless-ctags-with-git.html). When doing `conan config install` with a Git URL, I get error messages like this: ``` Traceback (most recent call last): File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 391, in _rmtree_unsafe os.rmdir(path) OSError: [WinError 145] The directory is not empty: 'C:\\Users\\kam\\.conan\\tmp_config_install\\config\\.git\\hooks' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\command.py", line 1230, in run method(args[0][1:]) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\command.py", line 379, in config return self._conan.config_install(args.item, verify_ssl, args.type) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conan_api.py", line 79, in wrapper return f(*args, **kwargs) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conan_api.py", line 510, in config_install return configuration_install(item, self._client_cache, self._user_io.out, verify_ssl, config_type) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\client\conf\config_installer.py", line 135, in configuration_install rmdir(tmp_folder) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\util\files.py", line 202, in rmdir shutil.rmtree(path, onerror=_change_permissions) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 494, in rmtree return _rmtree_unsafe(path, onerror) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe _rmtree_unsafe(fullname, onerror) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe _rmtree_unsafe(fullname, onerror) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 384, in _rmtree_unsafe _rmtree_unsafe(fullname, onerror) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\shutil.py", line 393, in _rmtree_unsafe onerror(os.rmdir, path, sys.exc_info()) File "c:\users\kam\.virtualenvs\skel-hv6zqtfh\lib\site-packages\conans\util\files.py", line 197, in _change_permissions raise OSError("Cannot change permissions for {}! Exception info: {}".format(path, exc_info)) OSError: Cannot change permissions for C:\Users\kam\.conan\tmp_config_install\config\.git\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>) ERROR: Cannot change permissions for C:\Users\kam\.conan\tmp_config_install\config\.git\hooks! Exception info: (<class 'OSError'>, OSError(41, 'The directory is not empty'), <traceback object at 0x0000016409078548>) ``` The problem boils down to the way files are locked on Windows. It's not possible to delete open files. But the hook starts a background process which has the script files open, and the cleanup of the directory fails. Of course, a second later, the directory can be deleted as files are closed and unlocked. I've since started reworking my Git template to not start background processes by default, and only run `ctags` in checkouts that I'm actually developing on. This fixes my problem with `conan config install`. It may make sense, though, to add `-c init.templateDir=` to the `git` command line when working on temporary downloads. It's part of sanitizing the process against user configuration: In a hook, _anything_ can happen, and the errors are obscure and difficult to understand. `conan config install` is mostly treating a Git repository as a sophisticated form of file archive, and probably doesn't want the user's hooks or other unusual setups from the template.
googleapis__google-cloud-python-1481
[ { "content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom gcloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n data = base64.b64decode(api_repr['data'])\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "path": "gcloud/pubsub/message.py" } ]
[ { "content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom gcloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "path": "gcloud/pubsub/message.py" } ]
diff --git a/gcloud/pubsub/message.py b/gcloud/pubsub/message.py index e8db230aa7b6..832a5d554c44 100644 --- a/gcloud/pubsub/message.py +++ b/gcloud/pubsub/message.py @@ -71,6 +71,6 @@ def from_api_repr(cls, api_repr): :type api_repr: dict or None :param api_repr: The API representation of the message """ - data = base64.b64decode(api_repr['data']) + data = base64.b64decode(api_repr.get('data', b'')) return cls(data=data, message_id=api_repr['messageId'], attributes=api_repr.get('attributes')) diff --git a/gcloud/pubsub/test_message.py b/gcloud/pubsub/test_message.py index 693f34405289..7bdc245ea2a9 100644 --- a/gcloud/pubsub/test_message.py +++ b/gcloud/pubsub/test_message.py @@ -42,31 +42,6 @@ def test_ctor_w_attributes(self): self.assertEqual(message.message_id, MESSAGE_ID) self.assertEqual(message.attributes, ATTRS) - def test_from_api_repr_no_attributes(self): - from base64 import b64encode as b64 - DATA = b'DEADBEEF' - B64_DATA = b64(DATA) - MESSAGE_ID = '12345' - api_repr = {'data': B64_DATA, 'messageId': MESSAGE_ID} - message = self._getTargetClass().from_api_repr(api_repr) - self.assertEqual(message.data, DATA) - self.assertEqual(message.message_id, MESSAGE_ID) - self.assertEqual(message.attributes, {}) - - def test_from_api_repr_w_attributes(self): - from base64 import b64encode as b64 - DATA = b'DEADBEEF' - B64_DATA = b64(DATA) - MESSAGE_ID = '12345' - ATTRS = {'a': 'b'} - api_repr = {'data': B64_DATA, - 'messageId': MESSAGE_ID, - 'attributes': ATTRS} - message = self._getTargetClass().from_api_repr(api_repr) - self.assertEqual(message.data, DATA) - self.assertEqual(message.message_id, MESSAGE_ID) - self.assertEqual(message.attributes, ATTRS) - def test_timestamp_no_attributes(self): DATA = b'DEADBEEF' MESSAGE_ID = b'12345' @@ -102,3 +77,36 @@ def test_timestamp_w_timestamp_in_attributes(self): message = self._makeOne(data=DATA, message_id=MESSAGE_ID, attributes=ATTRS) self.assertEqual(message.timestamp, timestamp) + + def test_from_api_repr_missing_data(self): + MESSAGE_ID = '12345' + api_repr = {'messageId': MESSAGE_ID} + message = self._getTargetClass().from_api_repr(api_repr) + self.assertEqual(message.data, b'') + self.assertEqual(message.message_id, MESSAGE_ID) + self.assertEqual(message.attributes, {}) + + def test_from_api_repr_no_attributes(self): + from base64 import b64encode as b64 + DATA = b'DEADBEEF' + B64_DATA = b64(DATA) + MESSAGE_ID = '12345' + api_repr = {'data': B64_DATA, 'messageId': MESSAGE_ID} + message = self._getTargetClass().from_api_repr(api_repr) + self.assertEqual(message.data, DATA) + self.assertEqual(message.message_id, MESSAGE_ID) + self.assertEqual(message.attributes, {}) + + def test_from_api_repr_w_attributes(self): + from base64 import b64encode as b64 + DATA = b'DEADBEEF' + B64_DATA = b64(DATA) + MESSAGE_ID = '12345' + ATTRS = {'a': 'b'} + api_repr = {'data': B64_DATA, + 'messageId': MESSAGE_ID, + 'attributes': ATTRS} + message = self._getTargetClass().from_api_repr(api_repr) + self.assertEqual(message.data, DATA) + self.assertEqual(message.message_id, MESSAGE_ID) + self.assertEqual(message.attributes, ATTRS)
pubsub fails if data key is not present If a message is published with a string of 0 length (`topic.publish( '', url=url, title=title)`) when the message is received there is no data field in the message and a key error is thrown when trying to transform the message from the PubSub API representation. https://github.com/GoogleCloudPlatform/gcloud-python/blob/master/gcloud/pubsub/message.py#L74 ``` Traceback (most recent call last): File "/en_notifications/en_notifications.py", line 51, in <module> received = PS_SUBSCRIPTION.pull(max_messages=PULL_COUNT) File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py", line 212, in pull File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/message.py", line 74, in from_api_repr for info in response.get('receivedMessages', ())] data = base64.b64decode(api_repr['data']) KeyError: 'data' ```
open-telemetry__opentelemetry-python-2093
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nOpenTelemetry SDK Configurator for Easy Instrumentation with Distros\n\"\"\"\n\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\n_EXPORTER_OTLP = \"otlp\"\n_EXPORTER_OTLP_SPAN = \"otlp_proto_grpc_span\"\n\n_RANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if trace_exporters and trace_exporters.lower().strip() != \"none\":\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if _EXPORTER_OTLP in exporters:\n exporters.remove(_EXPORTER_OTLP)\n exporters.add(_EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name\n # from the env variable else defaults to \"unknown_service\"\n provider = TracerProvider(\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for _, exporter_class in exporters.items():\n exporter_args = {}\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n f\"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'\"\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(f\"{exporter_name} is not a trace exporter\")\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(f\"{id_generator_name} is not an IdGenerator\")\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass _OTelSDKConfigurator(BaseConfigurator):\n \"\"\"A basic Configurator by OTel Python for initalizing OTel SDK components\n\n Initializes several crucial OTel SDK components (i.e. TracerProvider,\n MeterProvider, Processors...) according to a default implementation. Other\n Configurators can subclass and slightly alter this initialization.\n\n NOTE: This class should not be instantiated nor should it become an entry\n point on the `opentelemetry-sdk` package. Instead, distros should subclass\n this Configurator and enchance it as needed.\n \"\"\"\n\n def _configure(self, **kwargs):\n _initialize_components()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nOpenTelemetry SDK Configurator for Easy Instrumentation with Distros\n\"\"\"\n\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\n_EXPORTER_OTLP = \"otlp\"\n_EXPORTER_OTLP_SPAN = \"otlp_proto_grpc_span\"\n\n_RANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if trace_exporters and trace_exporters.lower().strip() != \"none\":\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if _EXPORTER_OTLP in exporters:\n exporters.remove(_EXPORTER_OTLP)\n exporters.add(_EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name\n # from the env variable else defaults to \"unknown_service\"\n provider = TracerProvider(\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for _, exporter_class in exporters.items():\n exporter_args = {}\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n f\"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'\"\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_traces_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(f\"{exporter_name} is not a trace exporter\")\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(f\"{id_generator_name} is not an IdGenerator\")\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass _OTelSDKConfigurator(BaseConfigurator):\n \"\"\"A basic Configurator by OTel Python for initalizing OTel SDK components\n\n Initializes several crucial OTel SDK components (i.e. TracerProvider,\n MeterProvider, Processors...) according to a default implementation. Other\n Configurators can subclass and slightly alter this initialization.\n\n NOTE: This class should not be instantiated nor should it become an entry\n point on the `opentelemetry-sdk` package. Instead, distros should subclass\n this Configurator and enchance it as needed.\n \"\"\"\n\n def _configure(self, **kwargs):\n _initialize_components()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 241c8252d5d..4e07232d068 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#2096](https://github.com/open-telemetry/opentelemetry-python/pull/2096)) - Fix propagation bug caused by counting skipped entries ([#2071](https://github.com/open-telemetry/opentelemetry-python/pull/2071)) +- Add entry point for exporters with default protocol + ([#2093](https://github.com/open-telemetry/opentelemetry-python/pull/2093)) - Do not skip sequence attribute on decode error ([#2097](https://github.com/open-telemetry/opentelemetry-python/pull/2097)) - `opentelemetry-test`: Add `HttpTestBase` to allow tests with actual TCP sockets diff --git a/exporter/opentelemetry-exporter-jaeger-proto-grpc/setup.cfg b/exporter/opentelemetry-exporter-jaeger-proto-grpc/setup.cfg index bd792429899..4f4228f9c76 100644 --- a/exporter/opentelemetry-exporter-jaeger-proto-grpc/setup.cfg +++ b/exporter/opentelemetry-exporter-jaeger-proto-grpc/setup.cfg @@ -52,5 +52,5 @@ where = src test = [options.entry_points] -opentelemetry_exporter = +opentelemetry_traces_exporter = jaeger_proto = opentelemetry.exporter.jaeger.proto.grpc:JaegerExporter diff --git a/exporter/opentelemetry-exporter-jaeger-thrift/setup.cfg b/exporter/opentelemetry-exporter-jaeger-thrift/setup.cfg index 945150d6879..e2c98a48096 100644 --- a/exporter/opentelemetry-exporter-jaeger-thrift/setup.cfg +++ b/exporter/opentelemetry-exporter-jaeger-thrift/setup.cfg @@ -51,5 +51,5 @@ where = src test = [options.entry_points] -opentelemetry_exporter = - jaeger_thrift = opentelemetry.exporter.jaeger.thrift:JaegerExporter \ No newline at end of file +opentelemetry_traces_exporter = + jaeger_thrift = opentelemetry.exporter.jaeger.thrift:JaegerExporter diff --git a/exporter/opentelemetry-exporter-jaeger/setup.cfg b/exporter/opentelemetry-exporter-jaeger/setup.cfg index 06e68fded5c..e882a65f975 100644 --- a/exporter/opentelemetry-exporter-jaeger/setup.cfg +++ b/exporter/opentelemetry-exporter-jaeger/setup.cfg @@ -44,3 +44,7 @@ install_requires = [options.extras_require] test = + +[options.entry_points] +opentelemetry_traces_exporter = + jaeger = opentelemetry.exporter.jaeger.proto.grpc:JaegerExporter diff --git a/exporter/opentelemetry-exporter-opencensus/setup.cfg b/exporter/opentelemetry-exporter-opencensus/setup.cfg index 03e82b70f37..1e10bc7b968 100644 --- a/exporter/opentelemetry-exporter-opencensus/setup.cfg +++ b/exporter/opentelemetry-exporter-opencensus/setup.cfg @@ -53,5 +53,5 @@ where = src test = [options.entry_points] -opentelemetry_exporter = - opencensus = opentelemetry.exporter.opencensus.trace_exporter:OpenCensusSpanExporter \ No newline at end of file +opentelemetry_traces_exporter = + opencensus = opentelemetry.exporter.opencensus.trace_exporter:OpenCensusSpanExporter diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/setup.cfg b/exporter/opentelemetry-exporter-otlp-proto-grpc/setup.cfg index 7013425c958..5d144190d35 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/setup.cfg +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/setup.cfg @@ -54,5 +54,5 @@ test = where = src [options.entry_points] -opentelemetry_exporter = - otlp_proto_grpc_span = opentelemetry.exporter.otlp.proto.grpc.trace_exporter:OTLPSpanExporter +opentelemetry_traces_exporter = + otlp_proto_grpc = opentelemetry.exporter.otlp.proto.grpc.trace_exporter:OTLPSpanExporter diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/setup.cfg b/exporter/opentelemetry-exporter-otlp-proto-http/setup.cfg index 5f6d102d120..38b82f42656 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-http/setup.cfg +++ b/exporter/opentelemetry-exporter-otlp-proto-http/setup.cfg @@ -53,5 +53,5 @@ test = where = src [options.entry_points] -opentelemetry_exporter = - otlp_proto_http_span = opentelemetry.exporter.otlp.proto.http.trace_exporter:OTLPSpanExporter +opentelemetry_traces_exporter = + otlp_proto_http = opentelemetry.exporter.otlp.proto.http.trace_exporter:OTLPSpanExporter diff --git a/exporter/opentelemetry-exporter-otlp/setup.cfg b/exporter/opentelemetry-exporter-otlp/setup.cfg index 8d24fca2429..b971015497d 100644 --- a/exporter/opentelemetry-exporter-otlp/setup.cfg +++ b/exporter/opentelemetry-exporter-otlp/setup.cfg @@ -39,3 +39,7 @@ python_requires = >=3.6 packages=find_namespace: install_requires = opentelemetry-exporter-otlp-proto-grpc == 1.5.0 + +[options.entry_points] +opentelemetry_traces_exporter = + otlp = opentelemetry.exporter.otlp.proto.grpc.trace_exporter:OTLPSpanExporter diff --git a/exporter/opentelemetry-exporter-zipkin-json/setup.cfg b/exporter/opentelemetry-exporter-zipkin-json/setup.cfg index 81fcbfa261c..2517b523261 100644 --- a/exporter/opentelemetry-exporter-zipkin-json/setup.cfg +++ b/exporter/opentelemetry-exporter-zipkin-json/setup.cfg @@ -52,5 +52,5 @@ where = src test = [options.entry_points] -opentelemetry_exporter = - zipkin_json = opentelemetry.exporter.zipkin.json:ZipkinExporter \ No newline at end of file +opentelemetry_traces_exporter = + zipkin_json = opentelemetry.exporter.zipkin.json:ZipkinExporter diff --git a/exporter/opentelemetry-exporter-zipkin-proto-http/setup.cfg b/exporter/opentelemetry-exporter-zipkin-proto-http/setup.cfg index 874ce5a368e..a6cdd5b5f87 100644 --- a/exporter/opentelemetry-exporter-zipkin-proto-http/setup.cfg +++ b/exporter/opentelemetry-exporter-zipkin-proto-http/setup.cfg @@ -54,5 +54,5 @@ where = src test = [options.entry_points] -opentelemetry_exporter = - zipkin_proto = opentelemetry.exporter.zipkin.proto.http:ZipkinExporter \ No newline at end of file +opentelemetry_traces_exporter = + zipkin_proto = opentelemetry.exporter.zipkin.proto.http:ZipkinExporter diff --git a/exporter/opentelemetry-exporter-zipkin/setup.cfg b/exporter/opentelemetry-exporter-zipkin/setup.cfg index bea3c1043d1..70e850bfe79 100644 --- a/exporter/opentelemetry-exporter-zipkin/setup.cfg +++ b/exporter/opentelemetry-exporter-zipkin/setup.cfg @@ -43,3 +43,7 @@ install_requires = [options.extras_require] test = + +[options.entry_points] +opentelemetry_traces_exporter = + zipkin = opentelemetry.exporter.zipkin.proto.http:ZipkinExporter diff --git a/opentelemetry-sdk/setup.cfg b/opentelemetry-sdk/setup.cfg index 160fde86e51..0c699262c64 100644 --- a/opentelemetry-sdk/setup.cfg +++ b/opentelemetry-sdk/setup.cfg @@ -52,8 +52,8 @@ where = src [options.entry_points] opentelemetry_tracer_provider = sdk_tracer_provider = opentelemetry.sdk.trace:TracerProvider -opentelemetry_exporter = - console_span = opentelemetry.sdk.trace.export:ConsoleSpanExporter +opentelemetry_traces_exporter = + console = opentelemetry.sdk.trace.export:ConsoleSpanExporter opentelemetry_id_generator = random = opentelemetry.sdk.trace.id_generator:RandomIdGenerator diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py index 78bb1377801..79445a7a357 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py @@ -109,7 +109,7 @@ def _import_exporters( exporter_name, exporter_impl, ) in _import_tracer_provider_config_components( - exporter_names, "opentelemetry_exporter" + exporter_names, "opentelemetry_traces_exporter" ): if issubclass(exporter_impl, SpanExporter): trace_exporters[exporter_name] = exporter_impl
RuntimeError: Requested component 'jaeger' not found in entry points for 'opentelemetry_exporter' From the exporters selection of [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection) >Known values for OTEL_TRACES_EXPORTER are: > >- "otlp": OTLP >- "jaeger": Jaeger gRPC >- "zipkin": Zipkin (Defaults to protobuf format) >- "none": No automatically configured exporter for traces. We have split up the exporters based on protocol + serialisation to avoid taking unnecessary dependency so there is no entry point for `jaeger`. If someone reads the spec and follows that recommendation they will run into this error. We should either add an entry point `jaeger` or an alias that solves this problem.
open-telemetry__opentelemetry-python-636
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n\n @classmethod\n def _reset(cls):\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n for slot in cls.__slots__:\n if slot in cls.__dict__.keys():\n delattr(cls, slot)\n delattr(cls, \"_{}\".format(slot))\n\n cls.__slots__ = []\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py" } ]
diff --git a/ext/opentelemetry-ext-flask/tests/base_test.py b/ext/opentelemetry-ext-flask/tests/base_test.py index 7147afd7193..42341826df0 100644 --- a/ext/opentelemetry-ext-flask/tests/base_test.py +++ b/ext/opentelemetry-ext-flask/tests/base_test.py @@ -19,6 +19,7 @@ from werkzeug.wrappers import BaseResponse from opentelemetry import trace +from opentelemetry.configuration import Configuration def expected_attributes(override_attributes): @@ -40,6 +41,10 @@ def expected_attributes(override_attributes): class InstrumentationTest: + def setUp(self): # pylint: disable=invalid-name + super().setUp() # pylint: disable=no-member + Configuration._reset() # pylint: disable=protected-access + @staticmethod def _hello_endpoint(helloid): if helloid == 500: diff --git a/ext/opentelemetry-ext-flask/tests/test_automatic.py b/ext/opentelemetry-ext-flask/tests/test_automatic.py index 04f43d6e642..b94c7b33d6b 100644 --- a/ext/opentelemetry-ext-flask/tests/test_automatic.py +++ b/ext/opentelemetry-ext-flask/tests/test_automatic.py @@ -16,7 +16,6 @@ from werkzeug.test import Client from werkzeug.wrappers import BaseResponse -from opentelemetry.configuration import Configuration from opentelemetry.ext.flask import FlaskInstrumentor from opentelemetry.test.test_base import TestBase from opentelemetry.test.wsgitestutil import WsgiTestBase @@ -29,8 +28,6 @@ class TestAutomatic(InstrumentationTest, TestBase, WsgiTestBase): def setUp(self): super().setUp() - Configuration._instance = None # pylint: disable=protected-access - Configuration.__slots__ = [] # pylint: disable=protected-access FlaskInstrumentor().instrument() self.app = flask.Flask(__name__) diff --git a/ext/opentelemetry-ext-flask/tests/test_programmatic.py b/ext/opentelemetry-ext-flask/tests/test_programmatic.py index 1075f808cb8..4e17f25fdc1 100644 --- a/ext/opentelemetry-ext-flask/tests/test_programmatic.py +++ b/ext/opentelemetry-ext-flask/tests/test_programmatic.py @@ -14,7 +14,6 @@ from flask import Flask -from opentelemetry.configuration import Configuration from opentelemetry.ext.flask import FlaskInstrumentor from opentelemetry.test.test_base import TestBase from opentelemetry.test.wsgitestutil import WsgiTestBase @@ -27,8 +26,6 @@ class TestProgrammatic(InstrumentationTest, TestBase, WsgiTestBase): def setUp(self): super().setUp() - Configuration._instance = None # pylint: disable=protected-access - Configuration.__slots__ = [] # pylint: disable=protected-access self.app = Flask(__name__) FlaskInstrumentor().instrument_app(self.app) diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py index 57b1c324c6c..ad546b0b864 100644 --- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py +++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py @@ -122,3 +122,20 @@ def __new__(cls) -> "Configuration": def __getattr__(self, name): return None + + @classmethod + def _reset(cls): + """ + This method "resets" the global configuration attributes + + It is not intended to be used by production code but by testing code + only. + """ + + for slot in cls.__slots__: + if slot in cls.__dict__.keys(): + delattr(cls, slot) + delattr(cls, "_{}".format(slot)) + + cls.__slots__ = [] + cls._instance = None diff --git a/opentelemetry-api/tests/configuration/test_configuration.py b/opentelemetry-api/tests/configuration/test_configuration.py index 9688ec28b6c..c736c972622 100644 --- a/opentelemetry-api/tests/configuration/test_configuration.py +++ b/opentelemetry-api/tests/configuration/test_configuration.py @@ -20,14 +20,10 @@ class TestConfiguration(TestCase): - def setUp(self): - # This is added here to force a reload of the whole Configuration - # class, resetting its internal attributes so that each tests starts - # with a clean class. - from opentelemetry.configuration import Configuration # type: ignore - def tearDown(self): - from opentelemetry.configuration import Configuration # type: ignore + # This call resets the attributes of the Configuration class so that + # each test is executed in the same conditions. + Configuration._reset() def test_singleton(self): self.assertIsInstance(Configuration(), Configuration) @@ -72,3 +68,23 @@ def test_slots(self): def test_getattr(self): self.assertIsNone(Configuration().XYZ) + + def test_reset(self): + environ_patcher = patch.dict( + "os.environ", # type: ignore + {"OPENTELEMETRY_PYTHON_TRACER_PROVIDER": "tracer_provider"}, + ) + + environ_patcher.start() + + self.assertEqual( + Configuration().TRACER_PROVIDER, "tracer_provider" + ) # pylint: disable=no-member + + environ_patcher.stop() + + Configuration._reset() + + self.assertIsNone( + Configuration().TRACER_PROVIDER + ) # pylint: disable=no-member
Add a standard way to "reset" a Configuration object for testing It is a common occurrence in tests that the global `Configuration` object needs to be "reset" between tests. This means that its attributes need to be set back to their original values. Since the `Configuration` object is immutable by design, some additional, non-production available mechanism is needed to perform this action. The need for this feature was mentioned in a [conversation](https://github.com/open-telemetry/opentelemetry-python/pull/630#discussion_r418343720) in #630.
adamchainz__django-mysql-486
[ { "content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals,\n)\n\nfrom django.core.checks import Tags, Warning, register\nfrom django.db import DEFAULT_DB_ALIAS, connections\n\nfrom django_mysql.utils import collapse_spaces\n\n\ndef register_checks():\n register(Tags.compatibility)(check_variables)\n\n\ndef check_variables(app_configs, **kwargs):\n errors = []\n\n for alias, connection in mysql_connections():\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"SELECT @@sql_mode,\n @@innodb_strict_mode,\n @@character_set_connection\"\"\")\n variables = cursor.fetchone()\n sql_mode, innodb_strict_mode, character_set_connection = variables\n\n modes = set(sql_mode.split(','))\n if not (modes & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}):\n errors.append(strict_mode_warning(alias))\n\n if not innodb_strict_mode:\n errors.append(innodb_strict_mode_warning(alias))\n\n if character_set_connection != 'utf8mb4':\n errors.append(utf8mb4_warning(alias))\n\n return errors\n\n\ndef strict_mode_warning(alias):\n message = \"MySQL Strict Mode is not set for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n MySQL's Strict Mode fixes many data integrity problems in MySQL, such\n as data truncation upon insertion, by escalating warnings into errors.\n It is strongly recommended you activate it. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w001-strict-mode\n \"\"\")\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W001',\n )\n\n\ndef innodb_strict_mode_warning(alias):\n message = \"InnoDB Strict Mode is not set for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n InnoDB Strict Mode escalates several warnings around InnoDB-specific\n statements into errors. It's recommended you activate this, but it's\n not very likely to affect you if you don't. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w002-innodb-strict-mode\n \"\"\")\n\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W002',\n )\n\n\ndef utf8mb4_warning(alias):\n message = \"The character set is not utf8mb4 for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n The default 'utf8' character set does not include support for all\n Unicode characters. It's strongly recommended you move to use\n 'utf8mb4'. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w003-utf8mb4\n \"\"\")\n\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W003',\n )\n\n\ndef mysql_connections():\n conn_names = [DEFAULT_DB_ALIAS] + list(\n set(connections) - {DEFAULT_DB_ALIAS},\n )\n for alias in conn_names:\n connection = connections[alias]\n if not hasattr(connection, 'mysql_version'):\n continue # pragma: no cover\n\n yield alias, connection\n", "path": "django_mysql/checks.py" } ]
[ { "content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals,\n)\n\nfrom django.core.checks import Tags, Warning, register\nfrom django.db import DEFAULT_DB_ALIAS, connections\n\nfrom django_mysql.utils import collapse_spaces\n\n\ndef register_checks():\n register(Tags.compatibility)(check_variables)\n\n\ndef check_variables(app_configs, **kwargs):\n errors = []\n\n for alias, connection in mysql_connections():\n with connection.temporary_connection() as cursor:\n cursor.execute(\"\"\"SELECT @@sql_mode,\n @@innodb_strict_mode,\n @@character_set_connection\"\"\")\n variables = cursor.fetchone()\n sql_mode, innodb_strict_mode, character_set_connection = variables\n\n modes = set(sql_mode.split(','))\n if not (modes & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}):\n errors.append(strict_mode_warning(alias))\n\n if not innodb_strict_mode:\n errors.append(innodb_strict_mode_warning(alias))\n\n if character_set_connection != 'utf8mb4':\n errors.append(utf8mb4_warning(alias))\n\n return errors\n\n\ndef strict_mode_warning(alias):\n message = \"MySQL Strict Mode is not set for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n MySQL's Strict Mode fixes many data integrity problems in MySQL, such\n as data truncation upon insertion, by escalating warnings into errors.\n It is strongly recommended you activate it. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w001-strict-mode\n \"\"\")\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W001',\n )\n\n\ndef innodb_strict_mode_warning(alias):\n message = \"InnoDB Strict Mode is not set for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n InnoDB Strict Mode escalates several warnings around InnoDB-specific\n statements into errors. It's recommended you activate this, but it's\n not very likely to affect you if you don't. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w002-innodb-strict-mode\n \"\"\")\n\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W002',\n )\n\n\ndef utf8mb4_warning(alias):\n message = \"The character set is not utf8mb4 for database connection '{}'\"\n hint = collapse_spaces(\"\"\"\n The default 'utf8' character set does not include support for all\n Unicode characters. It's strongly recommended you move to use\n 'utf8mb4'. See:\n https://django-mysql.readthedocs.io/en/latest/checks.html#django-mysql-w003-utf8mb4\n \"\"\")\n\n return Warning(\n message.format(alias),\n hint=hint,\n id='django_mysql.W003',\n )\n\n\ndef mysql_connections():\n conn_names = [DEFAULT_DB_ALIAS] + list(\n set(connections) - {DEFAULT_DB_ALIAS},\n )\n for alias in conn_names:\n connection = connections[alias]\n if not hasattr(connection, 'mysql_version'):\n continue # pragma: no cover\n\n yield alias, connection\n", "path": "django_mysql/checks.py" } ]
diff --git a/django_mysql/checks.py b/django_mysql/checks.py index 9ac65235..b483b650 100644 --- a/django_mysql/checks.py +++ b/django_mysql/checks.py @@ -17,7 +17,7 @@ def check_variables(app_configs, **kwargs): errors = [] for alias, connection in mysql_connections(): - with connection.cursor() as cursor: + with connection.temporary_connection() as cursor: cursor.execute("""SELECT @@sql_mode, @@innodb_strict_mode, @@character_set_connection""")
dangling connection created during system check may become unusable *Summary*: mysql connection created during system check is not closed timely. This connection might (mysteriously) become unusable afterwards (for reason I could not fully understand for now), which blocks the database access in application logic. *Description*: I'm using django with celery. Today I noticed that any task that accesses database by Django ORM just hangs forever (i.e. they only start but never finish). After some digging I realized it's database connection that blocks forever and never comes back. After another really painful debugging process, I figured out that, during django's system checking process, django-mysql created a persistent database connection rather than a temporary one, thus get re-used in application code. But somehow its connection is dropped, which causes any code that implicitly depends on it blocks forever. Specifically, I think the following code in `django_mysql/checks.py` should use a `BaseDatabaseWrapper.temporary_connection()`, therefore ensure connection is closed cleanly afterwards. ```python def check_variables(app_configs, **kwargs): errors = [] for alias, connection in mysql_connections(): with connection.cursor() as cursor: # connection.temporary_connection() cursor.execute("""SELECT @@sql_mode, @@innodb_strict_mode, @@character_set_connection""") # ... ``` (Sorry if this poor bug report seems in a hurry, I'm really tired (it's 4 AM my brain isn't working...) ) *Django Version*: Django 2.0.6 *Database and version used*: mysqld Ver 5.7.22 for Linux on x86_64 (MySQL Community Server (GPL)) *Version*: Django-MySQL 2.2.2
napari__napari-1371
[ { "content": "try:\n from ._version import version as __version__\nexcept ImportError:\n __version__ = \"not-installed\"\n\nimport os\nfrom distutils.version import StrictVersion\nfrom pathlib import Path\n\ntry:\n from qtpy import API_NAME\nexcept Exception as e:\n if 'No Qt bindings could be found' in str(e):\n raise type(e)(\n \"No Qt bindings could be found.\\n\\nnapari requires either PyQt5 or\"\n \" PySide2 to be installed in the environment.\\nTo install the \"\n 'default backend (currently PyQt5), run \"pip install napari[all]\"'\n '\\nYou may also use \"pip install napari[pyside2]\" for Pyside2, '\n 'or \"pip install napari[pyqt5]\" for PyQt5'\n ) from e\n raise\n\n\nif API_NAME == 'PySide2':\n # Set plugin path appropriately if using PySide2. This is a bug fix\n # for when both PyQt5 and Pyside2 are installed\n import PySide2\n\n os.environ['QT_PLUGIN_PATH'] = str(\n Path(PySide2.__file__).parent / 'Qt' / 'plugins'\n )\n\nfrom qtpy import QtCore\n\n# When QT is not the specific version, we raise a warning:\nfrom warnings import warn\n\nif StrictVersion(QtCore.__version__) < StrictVersion('5.12.3'):\n warn_message = f\"\"\"\n napari was tested with QT library `>=5.12.3`.\n The version installed is {QtCore.__version__}. Please report any issues with this\n specific QT version at https://github.com/Napari/napari/issues.\n \"\"\"\n warn(message=warn_message)\n\nfrom vispy import app\nimport logging\n\n# set vispy application to the appropriate qt backend\napp.use_app(API_NAME)\ndel app\n# set vispy logger to show warning and errors only\nvispy_logger = logging.getLogger('vispy')\nvispy_logger.setLevel(logging.WARNING)\n\nfrom .viewer import Viewer\nfrom .plugins.io import save_layers\n\n# Note that importing _viewer_key_bindings is needed as the Viewer gets\n# decorated with keybindings during that process, but it is not directly needed\n# by our users and so is deleted below\nfrom . import _viewer_key_bindings # noqa: F401\nfrom .view_layers import (\n view_path,\n view_image,\n view_labels,\n view_surface,\n view_shapes,\n view_points,\n view_vectors,\n)\nfrom ._qt import gui_qt\nfrom .utils import sys_info, _magicgui\n\n# register napari object types with magicgui if it is installed\n_magicgui.register_types_with_magicgui()\n\ndel _magicgui\ndel _viewer_key_bindings\n", "path": "napari/__init__.py" } ]
[ { "content": "try:\n from ._version import version as __version__\nexcept ImportError:\n __version__ = \"not-installed\"\n\nimport os\nfrom distutils.version import StrictVersion\nfrom pathlib import Path\n\ntry:\n from qtpy import API_NAME\nexcept Exception as e:\n if 'No Qt bindings could be found' in str(e):\n raise type(e)(\n \"No Qt bindings could be found.\\n\\nnapari requires either PyQt5 or\"\n \" PySide2 to be installed in the environment.\\nTo install the \"\n 'default backend (currently PyQt5), run \"pip install napari[all]\"'\n '\\nYou may also use \"pip install napari[pyside2]\" for Pyside2, '\n 'or \"pip install napari[pyqt5]\" for PyQt5'\n ) from e\n raise\n\n\nif API_NAME == 'PySide2':\n # Set plugin path appropriately if using PySide2. This is a bug fix\n # for when both PyQt5 and Pyside2 are installed\n import PySide2\n\n os.environ['QT_PLUGIN_PATH'] = str(\n Path(PySide2.__file__).parent / 'Qt' / 'plugins'\n )\n\nfrom qtpy import QtCore\n\n# When QT is not the specific version, we raise a warning:\nfrom warnings import warn\n\nif StrictVersion(QtCore.__version__) < StrictVersion('5.12.3'):\n warn_message = f\"\"\"\n napari was tested with QT library `>=5.12.3`.\n The version installed is {QtCore.__version__}. Please report any issues with this\n specific QT version at https://github.com/Napari/napari/issues.\n \"\"\"\n warn(message=warn_message)\n\nfrom vispy import app\nimport logging\n\n# set vispy application to the appropriate qt backend\napp.use_app(API_NAME)\ndel app\n# set vispy logger to show warning and errors only\nvispy_logger = logging.getLogger('vispy')\nvispy_logger.setLevel(logging.WARNING)\n\nfrom .viewer import Viewer\nfrom .plugins.io import save_layers\n\n# Note that importing _viewer_key_bindings is needed as the Viewer gets\n# decorated with keybindings during that process, but it is not directly needed\n# by our users and so is deleted below\nfrom . import _viewer_key_bindings # noqa: F401\nfrom .view_layers import (\n view_path,\n view_image,\n view_labels,\n view_surface,\n view_shapes,\n view_points,\n view_vectors,\n)\nfrom ._qt import gui_qt\nfrom .utils import sys_info, _magicgui\n\n# register napari object types with magicgui if it is installed\n_magicgui.register_types_with_magicgui()\n\n\n# this unused import is here to fix a very strange bug.\n# there is some mysterious magical goodness in scipy stats that needs\n# to be imported early.\n# see: https://github.com/napari/napari/issues/925\n# see: https://github.com/napari/napari/issues/1347\nfrom scipy import stats # noqa: F401\n\ndel _magicgui\ndel stats\ndel _viewer_key_bindings\n", "path": "napari/__init__.py" } ]
diff --git a/docs/release/release_0_3_5.md b/docs/release/release_0_3_5.md index 49135c5a43f..de028686f79 100644 --- a/docs/release/release_0_3_5.md +++ b/docs/release/release_0_3_5.md @@ -28,13 +28,13 @@ including plans for the future. ## Improvements - Factor out ImageSlice and ImageView from Image (#1343) - ## Bug Fixes - Fix warning for python 3.8 (#1335) - Fix range slider position (#1344) - Fix Linux and Windows key hold detection (#1350) - Fix crash when selecting all points (#1358) - Fix deleting layers changing dims (#1359) +- Revert "remove scipy.stats import (#1250)" (#1371) ## Build Tools and Support diff --git a/napari/__init__.py b/napari/__init__.py index 387a72b8d5a..2d42fee9e68 100644 --- a/napari/__init__.py +++ b/napari/__init__.py @@ -75,5 +75,14 @@ # register napari object types with magicgui if it is installed _magicgui.register_types_with_magicgui() + +# this unused import is here to fix a very strange bug. +# there is some mysterious magical goodness in scipy stats that needs +# to be imported early. +# see: https://github.com/napari/napari/issues/925 +# see: https://github.com/napari/napari/issues/1347 +from scipy import stats # noqa: F401 + del _magicgui +del stats del _viewer_key_bindings
Napari 0.3.4 release crashes when switching to volume rendering ## 🐛 Bug Napari 0.3.4 (release version) crashes when trying to render volume in 3D. ## To Reproduce Steps to reproduce the behavior: 1. Load a volume 2. Display and navigate through slices in 2D 3. Switch to 3D -> Crash ![napari_vol_crash](https://user-images.githubusercontent.com/2210044/84094896-3232af00-aa41-11ea-8477-87c4d5257e21.gif) ``` (stardist) λ napari WARNING: Error drawing visual <Volume at 0x1b6364c0d48> 10:17:55 WARNING Error drawing visual <Volume at 0x1b6364c0d48> WARNING: Traceback (most recent call last): File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\latebind.py", line 43, in __call__ return self._finalCall( *args, **named ) TypeError: 'NoneType' object is not callable During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\app\backends\_qt.py", line 825, in paintGL self._vispy_canvas.events.draw(region=None) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 455, in __call__ self._invoke_callback(cb, event) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 475, in _invoke_callback self, cb_event=(cb, event)) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 471, in _invoke_callback cb(event) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 217, in on_draw self._draw_scene() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 266, in _draw_scene self.draw_visual(self.scene) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 304, in draw_visual node.draw() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\visuals.py", line 99, in draw self._visual_superclass.draw(self) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\visuals\visual.py", line 443, in draw self._vshare.index_buffer) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\visuals\shaders\program.py", line 101, in draw Program.draw(self, *args, **kwargs) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\program.py", line 533, in draw canvas.context.flush_commands() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\context.py", line 176, in flush_commands self.glir.flush(self.shared.parser) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 572, in flush self._shared.flush(parser) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 494, in flush parser.parse(self._filter(self.clear(), parser)) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 819, in parse self._parse(command) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 789, in _parse ob.set_size(*args) # Texture[1D, 2D, 3D], RenderBuffer File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 1624, in set_size gl.GL_BYTE, shape[:3]) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 1573, in glTexImage3D width, height, depth, border, format, type, None) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\latebind.py", line 47, in __call__ return self._finalCall( *args, **named ) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\wrapper.py", line 882, in wrapperCall result = wrappedOperation( *cArguments ) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\platform\baseplatform.py", line 425, in __call__ self.__name__, self.__name__, OpenGL.error.NullFunctionError: Attempt to call an undefined function glTexImage3D, check for bool(glTexImage3D) before calling 10:17:56 WARNING Traceback (most recent call last): File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\latebind.py", line 43, in __call__ return self._finalCall( *args, **named ) TypeError: 'NoneType' object is not callable During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\app\backends\_qt.py", line 825, in paintGL self._vispy_canvas.events.draw(region=None) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 455, in __call__ self._invoke_callback(cb, event) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 475, in _invoke_callback self, cb_event=(cb, event)) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\util\event.py", line 471, in _invoke_callback cb(event) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 217, in on_draw self._draw_scene() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 266, in _draw_scene self.draw_visual(self.scene) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\canvas.py", line 304, in draw_visual node.draw() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\scene\visuals.py", line 99, in draw self._visual_superclass.draw(self) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\visuals\visual.py", line 443, in draw self._vshare.index_buffer) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\visuals\shaders\program.py", line 101, in draw Program.draw(self, *args, **kwargs) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\program.py", line 533, in draw canvas.context.flush_commands() File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\context.py", line 176, in flush_commands self.glir.flush(self.shared.parser) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 572, in flush self._shared.flush(parser) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 494, in flush parser.parse(self._filter(self.clear(), parser)) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 819, in parse self._parse(command) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 789, in _parse ob.set_size(*args) # Texture[1D, 2D, 3D], RenderBuffer File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 1624, in set_size gl.GL_BYTE, shape[:3]) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\vispy\gloo\glir.py", line 1573, in glTexImage3D width, height, depth, border, format, type, None) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\latebind.py", line 47, in __call__ return self._finalCall( *args, **named ) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\wrapper.py", line 882, in wrapperCall result = wrappedOperation( *cArguments ) File "c:\users\volker\anaconda3\envs\stardist\lib\site-packages\OpenGL\platform\baseplatform.py", line 425, in __call__ self.__name__, self.__name__, OpenGL.error.NullFunctionError: Attempt to call an undefined function glTexImage3D, check for bool(glTexImage3D) before calling ``` ## Expected behavior The same volume renders fine in an earlier version of napari. ## Environment - Please copy and paste the information at napari info option in help menubar here: ``` napari: 0.3.4 Platform: Windows-10-10.0.18362-SP0 Python: 3.7.7 (default, Apr 15 2020, 05:09:04) [MSC v.1916 64 bit (AMD64)] Qt: 5.14.2 PyQt5: 5.14.2 NumPy: 1.18.1 SciPy: 1.3.1 Dask: 2.18.0 VisPy: 0.6.4 GL version: 4.6.0 - Build 26.20.100.7812 MAX_TEXTURE_SIZE: 16384 Plugins: - napari-plugin-engine: 0.1.6 - svg: 0.1.3 ``` This was pip installed into an existing conda environment from the PyPI release. ## Additional context In the same environment, when I open an IPython console before switching to volume rendering (same volume), **napari does not crash !** ![napari_vol_no_crash](https://user-images.githubusercontent.com/2210044/84094918-3eb70780-aa41-11ea-860b-2bfb174eba83.gif) Also, the volume renders fine in one of my older napari installations/environments that has the same version of vispy, which rules out my initial thought that this would be a vispy issue. **My current guess is that this may be PyQt-related.** The environment below works. Note a few things are different, notably PySide instead of PyQt. ``` napari: 0.3.1+12.g0cd943c Platform: Windows-10-10.0.18362-SP0 Python: 3.7.7 (default, May 6 2020, 11:45:54) [MSC v.1916 64 bit (AMD64)] Qt: 5.14.2 PySide2: 5.14.2.1 NumPy: 1.18.4 SciPy: 1.4.1 Dask: 2.16.0 VisPy: 0.6.4 GL version: 4.6.0 - Build 26.20.100.7812 MAX_TEXTURE_SIZE: 16384 Plugins: - napari-plugin-engine: 0.1.5 - svg: 0.1.2 ```
ytdl-org__youtube-dl-18343
[ { "content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n float_or_none,\n qualities,\n ExtractorError,\n)\n\n\nclass GfycatIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',\n 'info_dict': {\n 'id': 'DeadlyDecisiveGermanpinscher',\n 'ext': 'mp4',\n 'title': 'Ghost in the Shell',\n 'timestamp': 1410656006,\n 'upload_date': '20140914',\n 'uploader': 'anonymous',\n 'duration': 10.4,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',\n 'info_dict': {\n 'id': 'JauntyTimelyAmazontreeboa',\n 'ext': 'mp4',\n 'title': 'JauntyTimelyAmazontreeboa',\n 'timestamp': 1411720126,\n 'upload_date': '20140926',\n 'uploader': 'anonymous',\n 'duration': 3.52,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n gfy = self._download_json(\n 'http://gfycat.com/cajax/get/%s' % video_id,\n video_id, 'Downloading video info')\n if 'error' in gfy:\n raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)\n gfy = gfy['gfyItem']\n\n title = gfy.get('title') or gfy['gfyName']\n description = gfy.get('description')\n timestamp = int_or_none(gfy.get('createDate'))\n uploader = gfy.get('userName')\n view_count = int_or_none(gfy.get('views'))\n like_count = int_or_none(gfy.get('likes'))\n dislike_count = int_or_none(gfy.get('dislikes'))\n age_limit = 18 if gfy.get('nsfw') == '1' else 0\n\n width = int_or_none(gfy.get('width'))\n height = int_or_none(gfy.get('height'))\n fps = int_or_none(gfy.get('frameRate'))\n num_frames = int_or_none(gfy.get('numFrames'))\n\n duration = float_or_none(num_frames, fps) if num_frames and fps else None\n\n categories = gfy.get('tags') or gfy.get('extraLemmas') or []\n\n FORMATS = ('gif', 'webm', 'mp4')\n quality = qualities(FORMATS)\n\n formats = []\n for format_id in FORMATS:\n video_url = gfy.get('%sUrl' % format_id)\n if not video_url:\n continue\n filesize = int_or_none(gfy.get('%sSize' % format_id))\n formats.append({\n 'url': video_url,\n 'format_id': format_id,\n 'width': width,\n 'height': height,\n 'fps': fps,\n 'filesize': filesize,\n 'quality': quality(format_id),\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'timestamp': timestamp,\n 'uploader': uploader,\n 'duration': duration,\n 'view_count': view_count,\n 'like_count': like_count,\n 'dislike_count': dislike_count,\n 'categories': categories,\n 'age_limit': age_limit,\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/gfycat.py" } ]
[ { "content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n float_or_none,\n qualities,\n ExtractorError,\n)\n\n\nclass GfycatIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',\n 'info_dict': {\n 'id': 'DeadlyDecisiveGermanpinscher',\n 'ext': 'mp4',\n 'title': 'Ghost in the Shell',\n 'timestamp': 1410656006,\n 'upload_date': '20140914',\n 'uploader': 'anonymous',\n 'duration': 10.4,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',\n 'info_dict': {\n 'id': 'JauntyTimelyAmazontreeboa',\n 'ext': 'mp4',\n 'title': 'JauntyTimelyAmazontreeboa',\n 'timestamp': 1411720126,\n 'upload_date': '20140926',\n 'uploader': 'anonymous',\n 'duration': 3.52,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n gfy = self._download_json(\n 'https://api.gfycat.com/v1/gfycats/%s' % video_id,\n video_id, 'Downloading video info')\n if 'error' in gfy:\n raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)\n gfy = gfy['gfyItem']\n\n title = gfy.get('title') or gfy['gfyName']\n description = gfy.get('description')\n timestamp = int_or_none(gfy.get('createDate'))\n uploader = gfy.get('userName')\n view_count = int_or_none(gfy.get('views'))\n like_count = int_or_none(gfy.get('likes'))\n dislike_count = int_or_none(gfy.get('dislikes'))\n age_limit = 18 if gfy.get('nsfw') == '1' else 0\n\n width = int_or_none(gfy.get('width'))\n height = int_or_none(gfy.get('height'))\n fps = int_or_none(gfy.get('frameRate'))\n num_frames = int_or_none(gfy.get('numFrames'))\n\n duration = float_or_none(num_frames, fps) if num_frames and fps else None\n\n categories = gfy.get('tags') or gfy.get('extraLemmas') or []\n\n FORMATS = ('gif', 'webm', 'mp4')\n quality = qualities(FORMATS)\n\n formats = []\n for format_id in FORMATS:\n video_url = gfy.get('%sUrl' % format_id)\n if not video_url:\n continue\n filesize = int_or_none(gfy.get('%sSize' % format_id))\n formats.append({\n 'url': video_url,\n 'format_id': format_id,\n 'width': width,\n 'height': height,\n 'fps': fps,\n 'filesize': filesize,\n 'quality': quality(format_id),\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'timestamp': timestamp,\n 'uploader': uploader,\n 'duration': duration,\n 'view_count': view_count,\n 'like_count': like_count,\n 'dislike_count': dislike_count,\n 'categories': categories,\n 'age_limit': age_limit,\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/gfycat.py" } ]
diff --git a/youtube_dl/extractor/gfycat.py b/youtube_dl/extractor/gfycat.py index a0670b6456a..c1b36a59b51 100644 --- a/youtube_dl/extractor/gfycat.py +++ b/youtube_dl/extractor/gfycat.py @@ -53,7 +53,7 @@ def _real_extract(self, url): video_id = self._match_id(url) gfy = self._download_json( - 'http://gfycat.com/cajax/get/%s' % video_id, + 'https://api.gfycat.com/v1/gfycats/%s' % video_id, video_id, 'Downloading video info') if 'error' in gfy: raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)
Gfycat cajax json endpoint is gone ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.11.23*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.11.23** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones - [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser ### What is the purpose of your *issue*? - [x] Bug report (encountered problems with youtube-dl) - [ ] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows: Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```): ``` $ youtube-dl https://gfycat.com/belovedsparseamericanbobtail -v [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: ['https://gfycat.com/belovedsparseamericanbobtail', '-v'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2018.11.23 [debug] Python version 3.6.5 (CPython) - Darwin-16.7.0-x86_64-i386-64bit [debug] exe versions: ffmpeg 3.2.2, ffprobe 3.2.2 [debug] Proxy map: {} [Gfycat] belovedsparseamericanbobtail: Downloading video info ERROR: Unable to download JSON metadata: HTTP Error 404: Not Found (caused by <HTTPError 404: 'Not Found'>); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 605, in _request_webpage return self._downloader.urlopen(url_or_request) File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 2211, in urlopen return self._opener.open(req, timeout=self._socket_timeout) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 532, in open response = meth(req, response) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 642, in http_response 'http', request, response, code, msg, hdrs) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 564, in error result = self._call_chain(*args) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 504, in _call_chain result = func(*args) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 756, in http_error_302 return self.parent.open(new, timeout=req.timeout) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 532, in open response = meth(req, response) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 642, in http_response 'http', request, response, code, msg, hdrs) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 570, in error return self._call_chain(*args) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 504, in _call_chain result = func(*args) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py", line 650, in http_error_default raise HTTPError(req.full_url, code, msg, hdrs, fp) ``` --- ### Description of your *issue*, suggested solution and other information Gfycat downloads no longer work because Gfycat removed the API youtube-dl uses: > Yes we announced deprecation of this endpoint 1.5 years ago. Are you using it for an app still? > > This is the link to our current API. https://developers.gfycat.com/api/#introduction https://www.reddit.com/r/gfycat/comments/a17ewc/embeded_gfycats_returning_failed_to_load_resource/eanbmrh/ So if you wanted to get https://gfycat.com/belovedsparseamericanbobtail then `youtube-dl` would look for the meta at https://gfycat.com/cajax/get/belovedsparseamericanbobtail (you can try the link, it'll 404). https://github.com/rg3/youtube-dl/blob/d9df8f120b325766181fb474a8c534e51df78f17/youtube_dl/extractor/gfycat.py#L55-L57 It's not obvious how to me how to fix this because the new api uses an oauth2 bearer token, and there isn't a key store AFAIK built into youtube-dl. The closest thing looks like the `.netrc` file support, or possibly a custom [config file entry](https://github.com/rg3/youtube-dl#configuration)
watchdogpolska__feder-349
[ { "content": "from atom.views import (ActionMessageMixin, ActionView, CreateMessageMixin,\n DeleteMessageMixin, UpdateMessageMixin)\nfrom braces.views import (FormValidMessageMixin, PrefetchRelatedMixin,\n SelectRelatedMixin, UserFormKwargsMixin)\nfrom cached_property import cached_property\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, DetailView, FormView,\n UpdateView)\nfrom django_filters.views import FilterView\n\nfrom feder.cases.models import Case\nfrom feder.main.mixins import (AttrPermissionRequiredMixin,\n RaisePermissionRequiredMixin)\nfrom .filters import TaskFilter\nfrom .forms import AnswerFormSet, SurveyForm, TaskForm\nfrom .models import Survey, Task\n\nDONE_MESSAGE_TEXT = _(\"Already done the job. If you want to change the answer - delete answers.\")\n\nTHANK_TEXT = _(\"Thank you for your submission. It is approaching us to know the \" +\n \"truth, by obtaining reliable data.\")\n\nEXHAUSTED_TEXT = _(\"Thank you for your help. Unfortunately, all the tasks \" +\n \"for you have been exhausted.\")\n\n\nclass TaskListView(SelectRelatedMixin, FilterView):\n filterset_class = TaskFilter\n model = Task\n select_related = ['case', 'questionary']\n paginate_by = 25\n\n def get_context_data(self, **kwargs):\n context = super(TaskListView, self).get_context_data(**kwargs)\n context['stats'] = self.object_list.survey_stats()\n return context\n\n\nclass TaskDetailView(SelectRelatedMixin, PrefetchRelatedMixin, DetailView):\n model = Task\n select_related = ['case__monitoring', 'case__institution', 'questionary']\n prefetch_related = ['survey_set', 'questionary__question_set']\n\n def get_user_survey(self):\n try:\n return (self.object.survey_set.with_full_answer().\n of_user(self.request.user, self.request.light_user).get())\n except Survey.DoesNotExist:\n return None\n\n def get_context_data(self, **kwargs):\n context = super(TaskDetailView, self).get_context_data(**kwargs)\n context['formset'] = AnswerFormSet(questionary=self.object.questionary)\n context['user_survey'] = self.get_user_survey()\n return context\n\n\nclass TaskSurveyView(SelectRelatedMixin, PrefetchRelatedMixin, DetailView):\n model = Task\n select_related = ['case__monitoring', 'case__institution', 'questionary', ]\n prefetch_related = ['questionary__question_set']\n template_name_suffix = '_survey'\n\n def get_context_data(self, **kwargs):\n context = super(TaskSurveyView, self).get_context_data(**kwargs)\n survey_list = (Survey.objects.for_task(self.object).with_user().with_full_answer().all())\n context['survey_list'] = survey_list\n user_survey_list = [x for x in survey_list if x.user == self.request.user] # TODO: Lazy\n context['user_survey'] = user_survey_list[0] if user_survey_list else None\n return context\n\n\nclass TaskCreateView(RaisePermissionRequiredMixin, UserFormKwargsMixin,\n CreateMessageMixin, CreateView):\n model = Task\n form_class = TaskForm\n permission_required = 'monitorings.add_task'\n\n @cached_property\n def case(self):\n return get_object_or_404(Case.objects.select_related('monitoring'),\n pk=self.kwargs['case'])\n\n def get_permission_object(self):\n return self.case.monitoring\n\n def get_form_kwargs(self):\n kw = super(TaskCreateView, self).get_form_kwargs()\n kw['case'] = self.case\n return kw\n\n def get_context_data(self, **kwargs):\n context = super(TaskCreateView, self).get_context_data(**kwargs)\n context['case'] = self.case\n return context\n\n\nclass TaskUpdateView(AttrPermissionRequiredMixin, UserFormKwargsMixin,\n UpdateMessageMixin, FormValidMessageMixin, UpdateView):\n model = Task\n form_class = TaskForm\n permission_required = 'change_task'\n permission_attribute = 'case__monitoring'\n\n\nclass TaskDeleteView(AttrPermissionRequiredMixin, DeleteMessageMixin, DeleteView):\n model = Task\n success_url = reverse_lazy('tasks:list')\n permission_required = 'delete_task'\n permission_attribute = 'case__monitoring'\n\n\nclass SurveyDeleteView(LoginRequiredMixin, DeleteMessageMixin, DeleteView):\n model = Survey\n slug_url_kwarg = 'task_id'\n slug_field = 'task_id'\n\n def get_queryset(self, *args, **kwargs):\n qs = super(SurveyDeleteView, self).get_queryset()\n return qs.of_user(self.request.user, self.request.light_user).with_full_answer()\n\n def get_success_url(self):\n return self.object.task.get_absolute_url()\n\n\nclass SurveySelectView(AttrPermissionRequiredMixin, ActionMessageMixin,\n SelectRelatedMixin, ActionView): # TODO: Write test\n model = Survey\n template_name_suffix = '_select'\n select_related = ['task__case__monitoring', ]\n permission_required = 'monitorings.select_survey'\n permission_attribute = 'task__case__monitoring'\n direction = None\n change = {'up': 1, 'down': -1}\n\n def action(self, *args, **kwargs):\n self.object.credibility_update(self.change[self.direction])\n self.object.save()\n\n def get_success_message(self):\n return _(\"Survey {object} selected!\").format(object=self.object)\n\n def get_success_url(self):\n return reverse('tasks:survey', kwargs={'pk': self.object.task_id})\n\n\nclass SurveyFillView(FormView):\n template_name = 'tasks/survey_fill.html'\n form_class = SurveyForm\n formset_class = AnswerFormSet\n\n @cached_property\n def task(self):\n return get_object_or_404(Task, pk=self.kwargs['pk'])\n\n @cached_property\n def object(self):\n try:\n return Survey.objects.filter(task=self.task).of_user(user=self.request.user,\n light_user=self.request.light_user).all()[0]\n except IndexError:\n return None\n\n def get_form_kwargs(self):\n kwargs = super(SurveyFillView, self).get_form_kwargs()\n kwargs['task'] = self.task\n kwargs['instance'] = self.object\n return kwargs\n\n def get_success_url(self):\n if 'save' in self.request.POST: # only save\n return self.object.task.get_absolute_url()\n\n # find next task\n try:\n next_task = self.task.get_next_for_user(self.request.user)\n return next_task.get_absolute_url()\n except Task.DoesNotExist:\n messages.success(self.request, EXHAUSTED_TEXT)\n return self.task.case.monitoring.get_absolute_url()\n\n @cached_property\n def formset(self):\n return self.formset_class(data=self.request.POST or None,\n survey=self.object,\n questionary=self.task.questionary)\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n if self.formset.is_valid():\n if self.request.user.is_authenticated():\n self.object.user = self.request.user\n else:\n self.object.light_user = self.request.light_user_new\n self.object.save()\n self.formset.save()\n return self.formset_valid(form, self.object, self.formset)\n return self.render_to_response(self.get_context_data())\n\n def formset_valid(self, form, obj, formset):\n messages.success(self.request, THANK_TEXT)\n obj.save()\n formset.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, **kwargs):\n context = super(SurveyFillView, self).get_context_data(**kwargs)\n context['formset'] = self.formset\n context['object'] = self.object\n context['task'] = self.task\n return context\n", "path": "feder/tasks/views.py" } ]
[ { "content": "from atom.views import (ActionMessageMixin, ActionView, CreateMessageMixin,\n DeleteMessageMixin, UpdateMessageMixin)\nfrom braces.views import (FormValidMessageMixin, PrefetchRelatedMixin,\n SelectRelatedMixin, UserFormKwargsMixin)\nfrom cached_property import cached_property\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, DetailView, FormView,\n UpdateView)\nfrom django_filters.views import FilterView\n\nfrom feder.cases.models import Case\nfrom feder.main.mixins import (AttrPermissionRequiredMixin,\n RaisePermissionRequiredMixin)\nfrom .filters import TaskFilter\nfrom .forms import AnswerFormSet, SurveyForm, TaskForm\nfrom .models import Survey, Task\n\nDONE_MESSAGE_TEXT = _(\"Already done the job. If you want to change the answer - delete answers.\")\n\nTHANK_TEXT = _(\"Thank you for your submission. It is approaching us to know the \" +\n \"truth, by obtaining reliable data.\")\n\nEXHAUSTED_TEXT = _(\"Thank you for your help. Unfortunately, all the tasks \" +\n \"for you have been exhausted.\")\n\n\nclass TaskListView(SelectRelatedMixin, FilterView):\n filterset_class = TaskFilter\n model = Task\n select_related = ['case', 'questionary']\n paginate_by = 25\n\n def get_context_data(self, **kwargs):\n context = super(TaskListView, self).get_context_data(**kwargs)\n context['stats'] = self.object_list.survey_stats()\n return context\n\n\nclass TaskDetailView(SelectRelatedMixin, PrefetchRelatedMixin, DetailView):\n model = Task\n select_related = ['case__monitoring', 'case__institution', 'questionary']\n prefetch_related = ['survey_set', 'questionary__question_set']\n\n def get_user_survey(self):\n try:\n return (self.object.survey_set.with_full_answer().\n of_user(self.request.user, self.request.light_user).get())\n except Survey.DoesNotExist:\n return None\n\n def get_context_data(self, **kwargs):\n context = super(TaskDetailView, self).get_context_data(**kwargs)\n context['formset'] = AnswerFormSet(questionary=self.object.questionary)\n context['user_survey'] = self.get_user_survey()\n return context\n\n\nclass TaskSurveyView(SelectRelatedMixin, PrefetchRelatedMixin, DetailView):\n model = Task\n select_related = ['case__monitoring', 'case__institution', 'questionary', ]\n prefetch_related = ['questionary__question_set']\n template_name_suffix = '_survey'\n\n def get_context_data(self, **kwargs):\n context = super(TaskSurveyView, self).get_context_data(**kwargs)\n survey_list = (Survey.objects.for_task(self.object).with_user().with_full_answer().all())\n context['survey_list'] = survey_list\n user_survey_list = [x for x in survey_list if x.user == self.request.user] # TODO: Lazy\n context['user_survey'] = user_survey_list[0] if user_survey_list else None\n return context\n\n\nclass TaskCreateView(RaisePermissionRequiredMixin, UserFormKwargsMixin,\n CreateMessageMixin, CreateView):\n model = Task\n form_class = TaskForm\n permission_required = 'monitorings.add_task'\n\n @cached_property\n def case(self):\n return get_object_or_404(Case.objects.select_related('monitoring'),\n pk=self.kwargs['case'])\n\n def get_permission_object(self):\n return self.case.monitoring\n\n def get_form_kwargs(self):\n kw = super(TaskCreateView, self).get_form_kwargs()\n kw['case'] = self.case\n return kw\n\n def get_context_data(self, **kwargs):\n context = super(TaskCreateView, self).get_context_data(**kwargs)\n context['case'] = self.case\n return context\n\n\nclass TaskUpdateView(AttrPermissionRequiredMixin, UserFormKwargsMixin,\n UpdateMessageMixin, FormValidMessageMixin, UpdateView):\n model = Task\n form_class = TaskForm\n permission_required = 'change_task'\n permission_attribute = 'case__monitoring'\n\n\nclass TaskDeleteView(AttrPermissionRequiredMixin, DeleteMessageMixin, DeleteView):\n model = Task\n success_url = reverse_lazy('tasks:list')\n permission_required = 'delete_task'\n permission_attribute = 'case__monitoring'\n\n\nclass SurveyDeleteView(LoginRequiredMixin, DeleteMessageMixin, DeleteView):\n model = Survey\n slug_url_kwarg = 'task_id'\n slug_field = 'task_id'\n\n def get_queryset(self, *args, **kwargs):\n qs = super(SurveyDeleteView, self).get_queryset()\n return qs.of_user(self.request.user, self.request.light_user).with_full_answer()\n\n def get_success_url(self):\n return self.object.task.get_absolute_url()\n\n\nclass SurveySelectView(AttrPermissionRequiredMixin, ActionMessageMixin,\n SelectRelatedMixin, ActionView): # TODO: Write test\n model = Survey\n template_name_suffix = '_select'\n select_related = ['task__case__monitoring', ]\n permission_required = 'monitorings.select_survey'\n permission_attribute = 'task__case__monitoring'\n direction = None\n change = {'up': 1, 'down': -1}\n\n def action(self, *args, **kwargs):\n self.object.credibility_update(self.change[self.direction])\n self.object.save()\n\n def get_success_message(self):\n if self.direction == 'up':\n return _(\"Survey credibility increased!\")\n else:\n return _(\"Survey credibility decreased!\")\n\n def get_success_url(self):\n return reverse('tasks:survey', kwargs={'pk': self.object.task_id})\n\n\nclass SurveyFillView(FormView):\n template_name = 'tasks/survey_fill.html'\n form_class = SurveyForm\n formset_class = AnswerFormSet\n\n @cached_property\n def task(self):\n return get_object_or_404(Task, pk=self.kwargs['pk'])\n\n @cached_property\n def object(self):\n try:\n return Survey.objects.filter(task=self.task).of_user(user=self.request.user,\n light_user=self.request.light_user).all()[0]\n except IndexError:\n return None\n\n def get_form_kwargs(self):\n kwargs = super(SurveyFillView, self).get_form_kwargs()\n kwargs['task'] = self.task\n kwargs['instance'] = self.object\n return kwargs\n\n def get_success_url(self):\n if 'save' in self.request.POST: # only save\n return self.object.task.get_absolute_url()\n\n # find next task\n try:\n next_task = self.task.get_next_for_user(self.request.user)\n return next_task.get_absolute_url()\n except Task.DoesNotExist:\n messages.success(self.request, EXHAUSTED_TEXT)\n return self.task.case.monitoring.get_absolute_url()\n\n @cached_property\n def formset(self):\n return self.formset_class(data=self.request.POST or None,\n survey=self.object,\n questionary=self.task.questionary)\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n if self.formset.is_valid():\n if self.request.user.is_authenticated():\n self.object.user = self.request.user\n else:\n self.object.light_user = self.request.light_user_new\n self.object.save()\n self.formset.save()\n return self.formset_valid(form, self.object, self.formset)\n return self.render_to_response(self.get_context_data())\n\n def formset_valid(self, form, obj, formset):\n messages.success(self.request, THANK_TEXT)\n obj.save()\n formset.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, **kwargs):\n context = super(SurveyFillView, self).get_context_data(**kwargs)\n context['formset'] = self.formset\n context['object'] = self.object\n context['task'] = self.task\n return context\n", "path": "feder/tasks/views.py" } ]
diff --git a/feder/tasks/locale/pl/LC_MESSAGES/django.mo b/feder/tasks/locale/pl/LC_MESSAGES/django.mo index 6ca3639b9..d24182059 100644 Binary files a/feder/tasks/locale/pl/LC_MESSAGES/django.mo and b/feder/tasks/locale/pl/LC_MESSAGES/django.mo differ diff --git a/feder/tasks/locale/pl/LC_MESSAGES/django.po b/feder/tasks/locale/pl/LC_MESSAGES/django.po index 1f42d33c9..b8a959c46 100644 --- a/feder/tasks/locale/pl/LC_MESSAGES/django.po +++ b/feder/tasks/locale/pl/LC_MESSAGES/django.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: tasks 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-08-11 01:25+0000\n" +"POT-Creation-Date: 2017-09-25 14:05+0000\n" "PO-Revision-Date: 2016-09-18 01:57+0200\n" "Last-Translator: Adam Dobrawy <[email protected]>\n" "Language-Team: Adam Dobrawy <[email protected]>\n" @@ -19,7 +19,12 @@ msgstr "" "|| n%100>=20) ? 1 : 2);\n" "X-Generator: Poedit 1.8.7.1\n" -#: filters.py:10 templates/tasks/task_survey.html:20 +#: apps.py:10 models.py:125 templates/tasks/base.html:5 +#: templates/tasks/task_detail.html:3 +msgid "Tasks" +msgstr "Zadania" + +#: filters.py:10 templates/tasks/task_survey.html:24 msgid "Creation date" msgstr "Data utworzenia" @@ -27,11 +32,23 @@ msgstr "Data utworzenia" msgid "Is done?" msgstr "Czy ukończone?" -#: filters.py:27 +#: filters.py:17 models.py:65 +msgid "Name" +msgstr "Nazwa" + +#: filters.py:21 +msgid "Monitoring" +msgstr "Monitoring" + +#: filters.py:24 +msgid "Institution" +msgstr "Instytucja" + +#: filters.py:31 msgid "creation date (ascending)" msgstr "Data utworzenia (rosnąco)" -#: filters.py:28 +#: filters.py:32 msgid "creation date (descending)" msgstr "Data utworzenia (malejąco)" @@ -51,11 +68,11 @@ msgstr "Przedrostek dla nazwy w postaci \"[przedrostek] #[numer]\"." msgid "They are available only cases relevant to the monitoring." msgstr "Tu są dostępne sprawy wyłącznie właściwe dla danego monitoringu. " -#: models.py:19 templates/tasks/task_filter.html:6 +#: models.py:21 templates/tasks/task_filter.html:6 msgid "Tasks index" msgstr "Wykaz zadań" -#: models.py:21 +#: models.py:23 msgid "" "Define how much answers do you need to mark tasks as done\n" " or count progress" @@ -63,58 +80,50 @@ msgstr "" "Określ jak wiele odpowiedzi, aby oznaczyć odpowiedź jako wykonaną lub do " "liczenia postępu." -#: models.py:63 -msgid "Name" -msgstr "Nazwa" - -#: models.py:64 templates/tasks/_task_small.html:8 -#: templates/tasks/task_detail.html:26 +#: models.py:66 templates/tasks/_task_small.html:8 +#: templates/tasks/task_detail.html:50 msgid "Case" msgstr "Sprawa" -#: models.py:65 templates/tasks/_task_small.html:9 -#: templates/tasks/task_detail.html:31 +#: models.py:67 templates/tasks/_task_small.html:9 +#: templates/tasks/task_detail.html:55 msgid "Questionary" msgstr "Kwestionariusz" -#: models.py:66 +#: models.py:68 msgid "Questionary to fill by user as task" msgstr "Kwestionariusz do wypełnienia przez użytkownika jako zadanie" -#: models.py:67 +#: models.py:69 msgid "Required survey count" msgstr "Liczba wymaganych ankiet" -#: models.py:70 +#: models.py:72 msgid "Done survey count" msgstr "Liczba ukończonych ankiet" -#: models.py:122 +#: models.py:124 msgid "Task" msgstr "Zadanie" -#: models.py:123 templates/tasks/base.html:5 -msgid "Tasks" -msgstr "Zadania" - -#: models.py:142 templates/tasks/task_survey.html:24 +#: models.py:166 templates/tasks/task_survey.html:28 msgid "Credibility" msgstr "Wiarygodność" -#: models.py:152 +#: models.py:176 msgid "Survey" msgstr "Ankieta" -#: models.py:153 +#: models.py:177 msgid "Surveys" msgstr "Ankiety" -#: models.py:171 +#: models.py:195 msgid "Answer" msgstr "Odpowiedź" -#: models.py:172 templates/tasks/_btn.html:26 -#: templates/tasks/task_survey.html:6 +#: models.py:196 templates/tasks/_btn.html:26 +#: templates/tasks/task_survey.html:10 msgid "Answers" msgstr "Odpowiedzi" @@ -122,8 +131,8 @@ msgstr "Odpowiedzi" msgid "Edit" msgstr "Edytuj" -#: templates/tasks/_btn.html:13 templates/tasks/survey_confirm_delete.html:16 -#: templates/tasks/task_detail.html:43 +#: templates/tasks/_btn.html:13 templates/tasks/survey_confirm_delete.html:22 +#: templates/tasks/task_detail.html:67 msgid "Delete" msgstr "Usuń" @@ -139,11 +148,11 @@ msgstr "Zaktualizuj odpowiedź" msgid "Delete answer" msgstr "Usuń odpowiedź" -#: templates/tasks/_formset.html:11 templates/tasks/survey_fill.html:27 +#: templates/tasks/_formset.html:11 templates/tasks/survey_fill.html:57 msgid "Save" msgstr "Zapisz" -#: templates/tasks/_formset.html:12 templates/tasks/survey_fill.html:28 +#: templates/tasks/_formset.html:12 templates/tasks/survey_fill.html:58 msgid "Save and go next" msgstr "Zapisz i przejdź do następnego" @@ -156,48 +165,48 @@ msgid "" msgstr "%(progress)s%% ukończono" #: templates/tasks/_task_select_list.html:11 -#: templates/tasks/task_detail.html:58 templates/tasks/task_filter.html:24 +#: templates/tasks/task_detail.html:82 templates/tasks/task_filter.html:24 msgid "No rows." msgstr "Brak wierszy" -#: templates/tasks/survey_confirm_delete.html:5 -#: templates/tasks/task_confirm_delete.html:4 +#: templates/tasks/survey_confirm_delete.html:10 +#: templates/tasks/task_confirm_delete.html:9 msgid "Confirm delete" msgstr "Potwierdź usuwanie" -#: templates/tasks/survey_fill.html:9 +#: templates/tasks/survey_fill.html:38 msgid "Survey fill" msgstr "Wypełnianie ankiety" -#: templates/tasks/survey_select.html:5 +#: templates/tasks/survey_select.html:10 msgid "Confirm select answer" msgstr "Potwierdź wybranie odpowiedzi" -#: templates/tasks/survey_select.html:14 templates/tasks/task_survey.html:32 +#: templates/tasks/survey_select.html:20 templates/tasks/task_survey.html:36 msgid "Credibility update" msgstr "Aktualizacja wiarygodności" -#: templates/tasks/task_detail.html:38 +#: templates/tasks/task_detail.html:62 msgid "Analysis" msgstr "Analiza" -#: templates/tasks/task_form.html:6 +#: templates/tasks/task_form.html:10 msgid "Update task" msgstr "Zaktualizuj zadanie" -#: templates/tasks/task_form.html:6 +#: templates/tasks/task_form.html:10 msgid "Add task" msgstr "Dodaj zadanie" -#: templates/tasks/task_survey.html:17 +#: templates/tasks/task_survey.html:21 msgid "User" msgstr "Użytkownik / użytkowniczka" -#: templates/tasks/task_survey.html:68 +#: templates/tasks/task_survey.html:72 msgid "Up" msgstr "Do góry" -#: templates/tasks/task_survey.html:75 +#: templates/tasks/task_survey.html:79 msgid "Down" msgstr "W dół" @@ -264,10 +273,13 @@ msgstr "" "Dziękujemy za Twoją pomoc. Niestety, wszystkie zadania - w tym monitoringu - " "zostały wyczerpane. " -#: views.py:145 -#, python-brace-format -msgid "Survey {object} selected!" -msgstr "Ankieta {object} wybrana!" +#: views.py:146 +msgid "Survey credibility increased!" +msgstr "Wiarygodność została zwiększona!" + +#: views.py:148 +msgid "Survey credibility decreased!" +msgstr "Wiarygodność została zmniejszona!" #~ msgid "Create task" #~ msgstr "Utwórz zadanie" diff --git a/feder/tasks/views.py b/feder/tasks/views.py index 12da03727..34ec3f6fc 100644 --- a/feder/tasks/views.py +++ b/feder/tasks/views.py @@ -142,7 +142,10 @@ def action(self, *args, **kwargs): self.object.save() def get_success_message(self): - return _("Survey {object} selected!").format(object=self.object) + if self.direction == 'up': + return _("Survey credibility increased!") + else: + return _("Survey credibility decreased!") def get_success_url(self): return reverse('tasks:survey', kwargs={'pk': self.object.task_id})
Przyjazny komunikat o aktualizacji wiarygodności ![zaznaczenie_1027](https://user-images.githubusercontent.com/3618479/30726463-49ccc99c-9f4b-11e7-8b74-db3108ac4e45.png)
pytorch__vision-810
[ { "content": "from __future__ import division\nimport torch\nimport sys\nimport math\nfrom PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport collections\nimport warnings\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n\ndef _is_pil_image(img):\n if accimage is not None:\n return isinstance(img, (Image.Image, accimage.Image))\n else:\n return isinstance(img, Image.Image)\n\n\ndef _is_tensor_image(img):\n return torch.is_tensor(img) and img.ndimension() == 3\n\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\n\ndef to_tensor(pic):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n See ``ToTensor`` for more details.\n\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n if not(_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n\n if isinstance(pic, np.ndarray):\n # handle numpy array\n if pic.ndim == 2:\n pic = pic[:, :, None]\n\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\n if accimage is not None and isinstance(pic, accimage.Image):\n nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)\n pic.copyto(nppic)\n return torch.from_numpy(nppic)\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n elif pic.mode == 'F':\n img = torch.from_numpy(np.array(pic, np.float32, copy=False))\n elif pic.mode == '1':\n img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\n\ndef to_pil_image(pic, mode=None):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n See :class:`~torchvision.transforms.ToPILImage` for more details.\n\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n\n Returns:\n PIL Image: Image converted to PIL Image.\n \"\"\"\n if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):\n raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))\n\n elif isinstance(pic, torch.Tensor):\n if pic.ndimension() not in {2, 3}:\n raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))\n\n elif pic.ndimension() == 2:\n # if 2D image, add channel dimension (CHW)\n pic = pic.unsqueeze(0)\n\n elif isinstance(pic, np.ndarray):\n if pic.ndim not in {2, 3}:\n raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))\n\n elif pic.ndim == 2:\n # if 2D image, add channel dimension (HWC)\n pic = np.expand_dims(pic, 2)\n\n npimg = pic\n if isinstance(pic, torch.FloatTensor):\n pic = pic.mul(255).byte()\n if isinstance(pic, torch.Tensor):\n npimg = np.transpose(pic.numpy(), (1, 2, 0))\n\n if not isinstance(npimg, np.ndarray):\n raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +\n 'not {}'.format(type(npimg)))\n\n if npimg.shape[2] == 1:\n expected_mode = None\n npimg = npimg[:, :, 0]\n if npimg.dtype == np.uint8:\n expected_mode = 'L'\n elif npimg.dtype == np.int16:\n expected_mode = 'I;16'\n elif npimg.dtype == np.int32:\n expected_mode = 'I'\n elif npimg.dtype == np.float32:\n expected_mode = 'F'\n if mode is not None and mode != expected_mode:\n raise ValueError(\"Incorrect mode ({}) supplied for input type {}. Should be {}\"\n .format(mode, np.dtype, expected_mode))\n mode = expected_mode\n\n elif npimg.shape[2] == 2:\n permitted_2_channel_modes = ['LA']\n if mode is not None and mode not in permitted_2_channel_modes:\n raise ValueError(\"Only modes {} are supported for 2D inputs\".format(permitted_2_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'LA'\n\n elif npimg.shape[2] == 4:\n permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']\n if mode is not None and mode not in permitted_4_channel_modes:\n raise ValueError(\"Only modes {} are supported for 4D inputs\".format(permitted_4_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGBA'\n else:\n permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']\n if mode is not None and mode not in permitted_3_channel_modes:\n raise ValueError(\"Only modes {} are supported for 3D inputs\".format(permitted_3_channel_modes))\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGB'\n\n if mode is None:\n raise TypeError('Input type {} is not supported'.format(npimg.dtype))\n\n return Image.fromarray(npimg, mode=mode)\n\n\ndef normalize(tensor, mean, std, inplace=False):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n\n .. note::\n This transform acts out of place by default, i.e., it does not mutates the input tensor.\n\n See :class:`~torchvision.transforms.Normalize` for more details.\n\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channely.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n\n if not inplace:\n tensor = tensor.clone()\n\n mean = torch.tensor(mean, dtype=torch.float32, device=tensor.device)\n std = torch.tensor(std, dtype=torch.float32, device=tensor.device)\n tensor.sub_(mean[:, None, None]).div_(std[:, None, None])\n return tensor\n\n\ndef resize(img, size, interpolation=Image.BILINEAR):\n r\"\"\"Resize the input PIL Image to the given size.\n\n Args:\n img (PIL Image): Image to be resized.\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaing\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n\n Returns:\n PIL Image: Resized image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):\n raise TypeError('Got inappropriate size arg: {}'.format(size))\n\n if isinstance(size, int):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), interpolation)\n else:\n return img.resize(size[::-1], interpolation)\n\n\ndef scale(*args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n return resize(*args, **kwargs)\n\n\ndef pad(img, padding, fill=0, padding_mode='constant'):\n r\"\"\"Pad the given PIL Image on all sides with specified padding mode and fill value.\n\n Args:\n img (PIL Image): Image to be padded.\n padding (int or tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if padding_mode == 'constant':\n if img.mode == 'P':\n palette = img.getpalette()\n image = ImageOps.expand(img, border=padding, fill=fill)\n image.putpalette(palette)\n return image\n\n return ImageOps.expand(img, border=padding, fill=fill)\n else:\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, Sequence) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if img.mode == 'P':\n palette = img.getpalette()\n img = np.asarray(img)\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)\n img = Image.fromarray(img)\n img.putpalette(palette)\n return img\n\n img = np.asarray(img)\n # RGB image\n if len(img.shape) == 3:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)\n # Grayscale image\n if len(img.shape) == 2:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)\n\n return Image.fromarray(img)\n\n\ndef crop(img, i, j, h, w):\n \"\"\"Crop the given PIL Image.\n\n Args:\n img (PIL Image): Image to be cropped.\n i: Upper pixel coordinate.\n j: Left pixel coordinate.\n h: Height of the cropped image.\n w: Width of the cropped image.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.crop((j, i, j + w, i + h))\n\n\ndef center_crop(img, output_size):\n if isinstance(output_size, numbers.Number):\n output_size = (int(output_size), int(output_size))\n w, h = img.size\n th, tw = output_size\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n return crop(img, i, j, th, tw)\n\n\ndef resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):\n \"\"\"Crop the given PIL Image and resize it to desired size.\n\n Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.\n\n Args:\n img (PIL Image): Image to be cropped.\n i: i in (i,j) i.e coordinates of the upper left corner\n j: j in (i,j) i.e coordinates of the upper left corner\n h: Height of the cropped image.\n w: Width of the cropped image.\n size (sequence or int): Desired output size. Same semantics as ``resize``.\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``.\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n assert _is_pil_image(img), 'img should be PIL Image'\n img = crop(img, i, j, h, w)\n img = resize(img, size, interpolation)\n return img\n\n\ndef hflip(img):\n \"\"\"Horizontally flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Horizontall flipped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n\n\ndef vflip(img):\n \"\"\"Vertically flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Vertically flipped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n\n\ndef five_crop(img, size):\n \"\"\"Crop the given PIL Image into four corners and the central crop.\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.\n \"\"\"\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n w, h = img.size\n crop_h, crop_w = size\n if crop_w > w or crop_h > h:\n raise ValueError(\"Requested crop size {} is bigger than input size {}\".format(size,\n (h, w)))\n tl = img.crop((0, 0, crop_w, crop_h))\n tr = img.crop((w - crop_w, 0, w, crop_h))\n bl = img.crop((0, h - crop_h, crop_w, h))\n br = img.crop((w - crop_w, h - crop_h, w, h))\n center = center_crop(img, (crop_h, crop_w))\n return (tl, tr, bl, br, center)\n\n\ndef ten_crop(img, size, vertical_flip=False):\n r\"\"\"Crop the given PIL Image into four corners and the central crop plus the\n flipped version of these (horizontal flipping is used by default).\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)\n Corresponding top left, top right, bottom left, bottom right and center crop\n and same for the flipped image.\n \"\"\"\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n first_five = five_crop(img, size)\n\n if vertical_flip:\n img = vflip(img)\n else:\n img = hflip(img)\n\n second_five = five_crop(img, size)\n return first_five + second_five\n\n\ndef adjust_brightness(img, brightness_factor):\n \"\"\"Adjust brightness of an Image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n PIL Image: Brightness adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img\n\n\ndef adjust_contrast(img, contrast_factor):\n \"\"\"Adjust contrast of an Image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n contrast_factor (float): How much to adjust the contrast. Can be any\n non negative number. 0 gives a solid gray image, 1 gives the\n original image while 2 increases the contrast by a factor of 2.\n\n Returns:\n PIL Image: Contrast adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img\n\n\ndef adjust_saturation(img, saturation_factor):\n \"\"\"Adjust color saturation of an image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. 0 will\n give a black and white image, 1 will give the original image while\n 2 will enhance the saturation by a factor of 2.\n\n Returns:\n PIL Image: Saturation adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjust hue of an image.\n\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n\n See `Hue`_ for more details.\n\n .. _Hue: https://en.wikipedia.org/wiki/Hue\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n\n Returns:\n PIL Image: Hue adjusted image.\n \"\"\"\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n input_mode = img.mode\n if input_mode in {'L', '1', 'I', 'F'}:\n return img\n\n h, s, v = img.convert('HSV').split()\n\n np_h = np.array(h, dtype=np.uint8)\n # uint8 addition take cares of rotation across boundaries\n with np.errstate(over='ignore'):\n np_h += np.uint8(hue_factor * 255)\n h = Image.fromarray(np_h, 'L')\n\n img = Image.merge('HSV', (h, s, v)).convert(input_mode)\n return img\n\n\ndef adjust_gamma(img, gamma, gain=1):\n r\"\"\"Perform gamma correction on an image.\n\n Also known as Power Law Transform. Intensities in RGB mode are adjusted\n based on the following equation:\n\n .. math::\n I_{\\text{out}} = 255 \\times \\text{gain} \\times \\left(\\frac{I_{\\text{in}}}{255}\\right)^{\\gamma}\n\n See `Gamma Correction`_ for more details.\n\n .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n gamma (float): Non negative real number, same as :math:`\\gamma` in the equation.\n gamma larger than 1 make the shadows darker,\n while gamma smaller than 1 make dark regions lighter.\n gain (float): The constant multiplier.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if gamma < 0:\n raise ValueError('Gamma should be a non-negative real number')\n\n input_mode = img.mode\n img = img.convert('RGB')\n\n gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3\n img = img.point(gamma_map) # use PIL's point-function to accelerate this part\n\n img = img.convert(input_mode)\n return img\n\n\ndef rotate(img, angle, resample=False, expand=False, center=None):\n \"\"\"Rotate the image by angle.\n\n\n Args:\n img (PIL Image): PIL Image to be rotated.\n angle (float or int): In degrees degrees counter clockwise order.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.rotate(angle, resample, expand, center)\n\n\ndef _get_inverse_affine_matrix(center, angle, translate, scale, shear):\n # Helper method to compute inverse matrix for affine transformation\n\n # As it is explained in PIL.Image.rotate\n # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1\n # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]\n # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]\n # RSS is rotation with scale and shear matrix\n # RSS(a, scale, shear) = [ cos(a)*scale -sin(a + shear)*scale 0]\n # [ sin(a)*scale cos(a + shear)*scale 0]\n # [ 0 0 1]\n # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1\n\n angle = math.radians(angle)\n shear = math.radians(shear)\n scale = 1.0 / scale\n\n # Inverted rotation matrix with scale and shear\n d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)\n matrix = [\n math.cos(angle + shear), math.sin(angle + shear), 0,\n -math.sin(angle), math.cos(angle), 0\n ]\n matrix = [scale / d * m for m in matrix]\n\n # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1\n matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])\n matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])\n\n # Apply center translation: C * RSS^-1 * C^-1 * T^-1\n matrix[2] += center[0]\n matrix[5] += center[1]\n return matrix\n\n\ndef affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):\n \"\"\"Apply affine transformation on the image keeping image center invariant\n\n Args:\n img (PIL Image): PIL Image to be rotated.\n angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.\n translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)\n scale (float): overall scale\n shear (float): shear angle value in degrees between -180 to 180, clockwise direction.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter.\n See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n assert isinstance(translate, (tuple, list)) and len(translate) == 2, \\\n \"Argument translate should be a list or tuple of length 2\"\n\n assert scale > 0.0, \"Argument scale should be positive\"\n\n output_size = img.size\n center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)\n matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)\n kwargs = {\"fillcolor\": fillcolor} if PILLOW_VERSION[0] == '5' else {}\n return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)\n\n\ndef to_grayscale(img, num_output_channels=1):\n \"\"\"Convert image to grayscale version of image.\n\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the image.\n if num_output_channels = 1 : returned image is single channel\n\n if num_output_channels = 3 : returned image is 3 channel with r = g = b\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if num_output_channels == 1:\n img = img.convert('L')\n elif num_output_channels == 3:\n img = img.convert('L')\n np_img = np.array(img, dtype=np.uint8)\n np_img = np.dstack([np_img, np_img, np_img])\n img = Image.fromarray(np_img, 'RGB')\n else:\n raise ValueError('num_output_channels should be either 1 or 3')\n\n return img\n", "path": "torchvision/transforms/functional.py" } ]
[ { "content": "from __future__ import division\nimport torch\nimport sys\nimport math\nfrom PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport collections\nimport warnings\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n\ndef _is_pil_image(img):\n if accimage is not None:\n return isinstance(img, (Image.Image, accimage.Image))\n else:\n return isinstance(img, Image.Image)\n\n\ndef _is_tensor_image(img):\n return torch.is_tensor(img) and img.ndimension() == 3\n\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\n\ndef to_tensor(pic):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n See ``ToTensor`` for more details.\n\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n if not(_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n\n if isinstance(pic, np.ndarray):\n # handle numpy array\n if pic.ndim == 2:\n pic = pic[:, :, None]\n\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\n if accimage is not None and isinstance(pic, accimage.Image):\n nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)\n pic.copyto(nppic)\n return torch.from_numpy(nppic)\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n elif pic.mode == 'F':\n img = torch.from_numpy(np.array(pic, np.float32, copy=False))\n elif pic.mode == '1':\n img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\n\ndef to_pil_image(pic, mode=None):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n See :class:`~torchvision.transforms.ToPILImage` for more details.\n\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n\n Returns:\n PIL Image: Image converted to PIL Image.\n \"\"\"\n if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):\n raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))\n\n elif isinstance(pic, torch.Tensor):\n if pic.ndimension() not in {2, 3}:\n raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))\n\n elif pic.ndimension() == 2:\n # if 2D image, add channel dimension (CHW)\n pic = pic.unsqueeze(0)\n\n elif isinstance(pic, np.ndarray):\n if pic.ndim not in {2, 3}:\n raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))\n\n elif pic.ndim == 2:\n # if 2D image, add channel dimension (HWC)\n pic = np.expand_dims(pic, 2)\n\n npimg = pic\n if isinstance(pic, torch.FloatTensor):\n pic = pic.mul(255).byte()\n if isinstance(pic, torch.Tensor):\n npimg = np.transpose(pic.numpy(), (1, 2, 0))\n\n if not isinstance(npimg, np.ndarray):\n raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +\n 'not {}'.format(type(npimg)))\n\n if npimg.shape[2] == 1:\n expected_mode = None\n npimg = npimg[:, :, 0]\n if npimg.dtype == np.uint8:\n expected_mode = 'L'\n elif npimg.dtype == np.int16:\n expected_mode = 'I;16'\n elif npimg.dtype == np.int32:\n expected_mode = 'I'\n elif npimg.dtype == np.float32:\n expected_mode = 'F'\n if mode is not None and mode != expected_mode:\n raise ValueError(\"Incorrect mode ({}) supplied for input type {}. Should be {}\"\n .format(mode, np.dtype, expected_mode))\n mode = expected_mode\n\n elif npimg.shape[2] == 2:\n permitted_2_channel_modes = ['LA']\n if mode is not None and mode not in permitted_2_channel_modes:\n raise ValueError(\"Only modes {} are supported for 2D inputs\".format(permitted_2_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'LA'\n\n elif npimg.shape[2] == 4:\n permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']\n if mode is not None and mode not in permitted_4_channel_modes:\n raise ValueError(\"Only modes {} are supported for 4D inputs\".format(permitted_4_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGBA'\n else:\n permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']\n if mode is not None and mode not in permitted_3_channel_modes:\n raise ValueError(\"Only modes {} are supported for 3D inputs\".format(permitted_3_channel_modes))\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGB'\n\n if mode is None:\n raise TypeError('Input type {} is not supported'.format(npimg.dtype))\n\n return Image.fromarray(npimg, mode=mode)\n\n\ndef normalize(tensor, mean, std, inplace=False):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n\n .. note::\n This transform acts out of place by default, i.e., it does not mutates the input tensor.\n\n See :class:`~torchvision.transforms.Normalize` for more details.\n\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channely.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n\n if not inplace:\n tensor = tensor.clone()\n\n mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)\n std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)\n tensor.sub_(mean[:, None, None]).div_(std[:, None, None])\n return tensor\n\n\ndef resize(img, size, interpolation=Image.BILINEAR):\n r\"\"\"Resize the input PIL Image to the given size.\n\n Args:\n img (PIL Image): Image to be resized.\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaing\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n\n Returns:\n PIL Image: Resized image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):\n raise TypeError('Got inappropriate size arg: {}'.format(size))\n\n if isinstance(size, int):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), interpolation)\n else:\n return img.resize(size[::-1], interpolation)\n\n\ndef scale(*args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n return resize(*args, **kwargs)\n\n\ndef pad(img, padding, fill=0, padding_mode='constant'):\n r\"\"\"Pad the given PIL Image on all sides with specified padding mode and fill value.\n\n Args:\n img (PIL Image): Image to be padded.\n padding (int or tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if padding_mode == 'constant':\n if img.mode == 'P':\n palette = img.getpalette()\n image = ImageOps.expand(img, border=padding, fill=fill)\n image.putpalette(palette)\n return image\n\n return ImageOps.expand(img, border=padding, fill=fill)\n else:\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, Sequence) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if img.mode == 'P':\n palette = img.getpalette()\n img = np.asarray(img)\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)\n img = Image.fromarray(img)\n img.putpalette(palette)\n return img\n\n img = np.asarray(img)\n # RGB image\n if len(img.shape) == 3:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)\n # Grayscale image\n if len(img.shape) == 2:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)\n\n return Image.fromarray(img)\n\n\ndef crop(img, i, j, h, w):\n \"\"\"Crop the given PIL Image.\n\n Args:\n img (PIL Image): Image to be cropped.\n i: Upper pixel coordinate.\n j: Left pixel coordinate.\n h: Height of the cropped image.\n w: Width of the cropped image.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.crop((j, i, j + w, i + h))\n\n\ndef center_crop(img, output_size):\n if isinstance(output_size, numbers.Number):\n output_size = (int(output_size), int(output_size))\n w, h = img.size\n th, tw = output_size\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n return crop(img, i, j, th, tw)\n\n\ndef resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):\n \"\"\"Crop the given PIL Image and resize it to desired size.\n\n Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.\n\n Args:\n img (PIL Image): Image to be cropped.\n i: i in (i,j) i.e coordinates of the upper left corner\n j: j in (i,j) i.e coordinates of the upper left corner\n h: Height of the cropped image.\n w: Width of the cropped image.\n size (sequence or int): Desired output size. Same semantics as ``resize``.\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``.\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n assert _is_pil_image(img), 'img should be PIL Image'\n img = crop(img, i, j, h, w)\n img = resize(img, size, interpolation)\n return img\n\n\ndef hflip(img):\n \"\"\"Horizontally flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Horizontall flipped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n\n\ndef vflip(img):\n \"\"\"Vertically flip the given PIL Image.\n\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Vertically flipped image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n\n\ndef five_crop(img, size):\n \"\"\"Crop the given PIL Image into four corners and the central crop.\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.\n \"\"\"\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n w, h = img.size\n crop_h, crop_w = size\n if crop_w > w or crop_h > h:\n raise ValueError(\"Requested crop size {} is bigger than input size {}\".format(size,\n (h, w)))\n tl = img.crop((0, 0, crop_w, crop_h))\n tr = img.crop((w - crop_w, 0, w, crop_h))\n bl = img.crop((0, h - crop_h, crop_w, h))\n br = img.crop((w - crop_w, h - crop_h, w, h))\n center = center_crop(img, (crop_h, crop_w))\n return (tl, tr, bl, br, center)\n\n\ndef ten_crop(img, size, vertical_flip=False):\n r\"\"\"Crop the given PIL Image into four corners and the central crop plus the\n flipped version of these (horizontal flipping is used by default).\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)\n Corresponding top left, top right, bottom left, bottom right and center crop\n and same for the flipped image.\n \"\"\"\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n first_five = five_crop(img, size)\n\n if vertical_flip:\n img = vflip(img)\n else:\n img = hflip(img)\n\n second_five = five_crop(img, size)\n return first_five + second_five\n\n\ndef adjust_brightness(img, brightness_factor):\n \"\"\"Adjust brightness of an Image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n PIL Image: Brightness adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img\n\n\ndef adjust_contrast(img, contrast_factor):\n \"\"\"Adjust contrast of an Image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n contrast_factor (float): How much to adjust the contrast. Can be any\n non negative number. 0 gives a solid gray image, 1 gives the\n original image while 2 increases the contrast by a factor of 2.\n\n Returns:\n PIL Image: Contrast adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img\n\n\ndef adjust_saturation(img, saturation_factor):\n \"\"\"Adjust color saturation of an image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. 0 will\n give a black and white image, 1 will give the original image while\n 2 will enhance the saturation by a factor of 2.\n\n Returns:\n PIL Image: Saturation adjusted image.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjust hue of an image.\n\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n\n See `Hue`_ for more details.\n\n .. _Hue: https://en.wikipedia.org/wiki/Hue\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n\n Returns:\n PIL Image: Hue adjusted image.\n \"\"\"\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n input_mode = img.mode\n if input_mode in {'L', '1', 'I', 'F'}:\n return img\n\n h, s, v = img.convert('HSV').split()\n\n np_h = np.array(h, dtype=np.uint8)\n # uint8 addition take cares of rotation across boundaries\n with np.errstate(over='ignore'):\n np_h += np.uint8(hue_factor * 255)\n h = Image.fromarray(np_h, 'L')\n\n img = Image.merge('HSV', (h, s, v)).convert(input_mode)\n return img\n\n\ndef adjust_gamma(img, gamma, gain=1):\n r\"\"\"Perform gamma correction on an image.\n\n Also known as Power Law Transform. Intensities in RGB mode are adjusted\n based on the following equation:\n\n .. math::\n I_{\\text{out}} = 255 \\times \\text{gain} \\times \\left(\\frac{I_{\\text{in}}}{255}\\right)^{\\gamma}\n\n See `Gamma Correction`_ for more details.\n\n .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n gamma (float): Non negative real number, same as :math:`\\gamma` in the equation.\n gamma larger than 1 make the shadows darker,\n while gamma smaller than 1 make dark regions lighter.\n gain (float): The constant multiplier.\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if gamma < 0:\n raise ValueError('Gamma should be a non-negative real number')\n\n input_mode = img.mode\n img = img.convert('RGB')\n\n gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3\n img = img.point(gamma_map) # use PIL's point-function to accelerate this part\n\n img = img.convert(input_mode)\n return img\n\n\ndef rotate(img, angle, resample=False, expand=False, center=None):\n \"\"\"Rotate the image by angle.\n\n\n Args:\n img (PIL Image): PIL Image to be rotated.\n angle (float or int): In degrees degrees counter clockwise order.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.rotate(angle, resample, expand, center)\n\n\ndef _get_inverse_affine_matrix(center, angle, translate, scale, shear):\n # Helper method to compute inverse matrix for affine transformation\n\n # As it is explained in PIL.Image.rotate\n # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1\n # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]\n # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]\n # RSS is rotation with scale and shear matrix\n # RSS(a, scale, shear) = [ cos(a)*scale -sin(a + shear)*scale 0]\n # [ sin(a)*scale cos(a + shear)*scale 0]\n # [ 0 0 1]\n # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1\n\n angle = math.radians(angle)\n shear = math.radians(shear)\n scale = 1.0 / scale\n\n # Inverted rotation matrix with scale and shear\n d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)\n matrix = [\n math.cos(angle + shear), math.sin(angle + shear), 0,\n -math.sin(angle), math.cos(angle), 0\n ]\n matrix = [scale / d * m for m in matrix]\n\n # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1\n matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])\n matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])\n\n # Apply center translation: C * RSS^-1 * C^-1 * T^-1\n matrix[2] += center[0]\n matrix[5] += center[1]\n return matrix\n\n\ndef affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):\n \"\"\"Apply affine transformation on the image keeping image center invariant\n\n Args:\n img (PIL Image): PIL Image to be rotated.\n angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.\n translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)\n scale (float): overall scale\n shear (float): shear angle value in degrees between -180 to 180, clockwise direction.\n resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\n An optional resampling filter.\n See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\n fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n assert isinstance(translate, (tuple, list)) and len(translate) == 2, \\\n \"Argument translate should be a list or tuple of length 2\"\n\n assert scale > 0.0, \"Argument scale should be positive\"\n\n output_size = img.size\n center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)\n matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)\n kwargs = {\"fillcolor\": fillcolor} if PILLOW_VERSION[0] == '5' else {}\n return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)\n\n\ndef to_grayscale(img, num_output_channels=1):\n \"\"\"Convert image to grayscale version of image.\n\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the image.\n if num_output_channels = 1 : returned image is single channel\n\n if num_output_channels = 3 : returned image is 3 channel with r = g = b\n \"\"\"\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if num_output_channels == 1:\n img = img.convert('L')\n elif num_output_channels == 3:\n img = img.convert('L')\n np_img = np.array(img, dtype=np.uint8)\n np_img = np.dstack([np_img, np_img, np_img])\n img = Image.fromarray(np_img, 'RGB')\n else:\n raise ValueError('num_output_channels should be either 1 or 3')\n\n return img\n", "path": "torchvision/transforms/functional.py" } ]
diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 71c0ff87bf5..14e3d794283 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -203,8 +203,8 @@ def normalize(tensor, mean, std, inplace=False): if not inplace: tensor = tensor.clone() - mean = torch.tensor(mean, dtype=torch.float32, device=tensor.device) - std = torch.tensor(std, dtype=torch.float32, device=tensor.device) + mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device) + std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device) tensor.sub_(mean[:, None, None]).div_(std[:, None, None]) return tensor
`UserWarning` when applying `Normalize` Hi! pytorch's version: 1.0.1.post2 torchvision's version: 0.2.2.post3 Sorry for the formatting, but you need to scroll the snippet to the right in order to see the warning. ```python >>> from torch import tensor >>> from torchvision.transforms import Normalize >>> n = Normalize(tensor([1.0, 2.0, 3.0]), tensor([1.0, 2.0, 3.0])) >>> t = tensor([1.0, 2.0, 3.0]).view(3, 1, 1) >>> n(t) /home/ygorishniy/miniconda3/envs/x/lib/python3.6/site-packages/torchvision/transforms/functional.py:206: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). mean = torch.tensor(mean, dtype=torch.float32) /home/ygorishniy/miniconda3/envs/x/lib/python3.6/site-packages/torchvision/transforms/functional.py:207: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). std = torch.tensor(std, dtype=torch.float32) Out[17]: tensor([[[0.]], [[0.]], [[0.]]]) ```
cowrie__cowrie-1761
[ { "content": "# Copyright (c) 2013 Bas Stottelaar <basstottelaar [AT] gmail [DOT] com>\n\nfrom __future__ import annotations\n\nimport getopt\nimport os\nimport random\nimport re\nimport time\n\nfrom twisted.internet import reactor # type: ignore\nfrom twisted.internet.defer import Deferred\n\nfrom cowrie.core.config import CowrieConfig\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nclass Command_gcc(HoneyPotCommand):\n # Name of program. Under OSX, you might consider i686-apple-darwin11-llvm-gcc-X.X\n APP_NAME = \"gcc\"\n\n # GCC verson, used in help, version and the commandline name gcc-X.X\n APP_VERSION = (4, 7, 2)\n\n # Random binary data, which looks awesome. You could change this to whatever you want, but this\n # data will be put in the actual file and thus exposed to our hacker when he\\she cats the file.\n RANDOM_DATA = (\n b\"\\x6a\\x00\\x48\\x89\\xe5\\x48\\x83\\xe4\\xf0\\x48\\x8b\\x7d\\x08\\x48\\x8d\\x75\\x10\\x89\\xfa\"\n b\"\\x83\\xc2\\x01\\xc1\\xe2\\x03\\x48\\x01\\xf2\\x48\\x89\\xd1\\xeb\\x04\\x48\\x83\\xc1\\x08\\x48\"\n b\"\\x83\\x39\\x00\\x75\\xf6\\x48\\x83\\xc1\\x08\\xe8\\x0c\\x00\\x00\\x00\\x89\\xc7\\xe8\\xb9\\x00\"\n b\"\\x00\\x00\\xf4\\x90\\x90\\x90\\x90\\x55\\x48\\x89\\xe5\\x48\\x83\\xec\\x40\\x89\\x7d\\xfc\\x48\"\n b\"\\x89\\x75\\xf0\\x48\\x8b\\x45\\xf0\\x48\\x8b\\x00\\x48\\x83\\xf8\\x00\\x75\\x0c\\xb8\\x00\\x00\"\n b\"\\x00\\x00\\x89\\xc7\\xe8\\x8c\\x00\\x00\\x00\\x48\\x8b\\x45\\xf0\\x48\\x8b\\x40\\x08\\x30\\xc9\"\n b\"\\x48\\x89\\xc7\\x88\\xc8\\xe8\\x7e\\x00\\x00\\x00\\x89\\xc1\\x89\\x4d\\xdc\\x48\\x8d\\x0d\\xd8\"\n b\"\\x01\\x00\\x00\\x48\\x89\\xcf\\x48\\x89\\x4d\\xd0\\xe8\\x72\\x00\\x00\\x00\\x8b\\x4d\\xdc\\x30\"\n b\"\\xd2\\x48\\x8d\\x3d\\xa4\\x00\\x00\\x00\\x89\\xce\\x88\\x55\\xcf\\x48\\x89\\xc2\\x8a\\x45\\xcf\"\n b\"\\xe8\\x53\\x00\\x00\\x00\\x8b\\x45\\xdc\\x88\\x05\\xc3\\x01\\x00\\x00\\x8b\\x45\\xdc\\xc1\\xe8\"\n b\"\\x08\\x88\\x05\\xb8\\x01\\x00\\x00\\x8b\\x45\\xdc\\xc1\\xe8\\x10\\x88\\x05\\xad\\x01\\x00\\x00\"\n b\"\\x8b\\x45\\xdc\\xc1\\xe8\\x18\\x88\\x05\\xa2\\x01\\x00\\x00\\x48\\x8b\\x45\\xd0\\x48\\x89\\x45\"\n b\"\\xe0\\x48\\x8b\\x45\\xe0\\xff\\xd0\\x8b\\x45\\xec\\x48\\x83\\xc4\\x40\\x5d\\xc3\\xff\\x25\\x3e\"\n b\"\\x01\\x00\\x00\\xff\\x25\\x40\\x01\\x00\\x00\\xff\\x25\\x42\\x01\\x00\\x00\\xff\\x25\\x44\\x01\"\n b\"\\x00\\x00\\x4c\\x8d\\x1d\\x1d\\x01\\x00\\x00\\x41\\x53\\xff\\x25\\x0d\\x01\\x00\\x00\\x90\\x68\"\n b\"\\x00\\x00\\x00\\x00\\xe9\\xe6\\xff\\xff\\xff\\x68\\x0c\\x00\\x00\\x00\\xe9\\xdc\\xff\\xff\\xff\"\n b\"\\x68\\x1d\\x00\\x00\\x00\\xe9\\xd2\\xff\\xff\\xff\\x68\\x2b\\x00\\x00\\x00\\xe9\\xc8\\xff\\xff\"\n b\"\\xff\\x01\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x1c\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x0e\\x00\\x00\\x34\\x00\\x00\\x00\\x34\"\n b\"\\x00\\x00\\x00\\xf5\\x0e\\x00\\x00\\x00\\x00\\x00\\x00\\x34\\x00\\x00\\x00\\x03\\x00\\x00\\x00\"\n b\"\\x0c\\x00\\x02\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x00\\x01\\x40\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x01\\x00\\x00\\x00\"\n )\n\n scheduled: Deferred\n\n def start(self):\n \"\"\"\n Parse as much as possible from a GCC syntax and generate the output\n that is requested. The file that is generated can be read (and will)\n output garbage from an actual file, but when executed, it will generate\n a segmentation fault.\n\n The input files are expected to exists, but can be empty.\n\n Verified syntaxes, including non-existing files:\n * gcc test.c\n * gcc test.c -o program\n * gcc test1.c test2.c\n * gcc test1.c test2.c -o program\n * gcc test.c -o program -lm\n * gcc -g test.c -o program -lm\n * gcc test.c -DF_CPU=16000000 -I../etc -o program\n * gcc test.c -O2 -o optimized_program\n * gcc test.c -Wstrict-overflow=n -o overflowable_program\n\n Others:\n * gcc\n * gcc -h\n * gcc -v\n * gcc --help\n * gcc --version\n \"\"\"\n\n output_file = None\n input_files = 0\n complete = True\n\n # Parse options or display no files\n try:\n opts, args = getopt.gnu_getopt(\n self.args, \"ESchvgo:x:l:I:W:D:X:O:\", [\"help\", \"version\", \"param\"]\n )\n except getopt.GetoptError:\n self.no_files()\n return\n\n # Parse options\n for o, a in opts:\n if o in (\"-v\"):\n self.version(short=False)\n return\n elif o in (\"--version\"):\n self.version(short=True)\n return\n elif o in (\"-h\"):\n self.arg_missing(\"-h\")\n return\n elif o in (\"--help\"):\n self.help()\n return\n elif o in (\"-o\"):\n if len(a) == 0:\n self.arg_missing(\"-o\")\n else:\n output_file = a\n\n # Check for *.c or *.cpp files\n for value in args:\n if \".c\" in value.lower():\n sourcefile = self.fs.resolve_path(value, self.protocol.cwd)\n\n if self.fs.exists(sourcefile):\n input_files = input_files + 1\n else:\n self.write(\n f\"{Command_gcc.APP_NAME}: {value}: No such file or directory\\n\"\n )\n complete = False\n\n # To generate, or not\n if input_files > 0 and complete:\n timeout = 0.1 + random.random()\n\n # Schedule call to make it more time consuming and real\n self.scheduled = reactor.callLater( # type: ignore[attr-defined]\n timeout, self.generate_file(output_file if output_file else \"a.out\")\n )\n else:\n self.no_files()\n\n def handle_CTRL_C(self):\n \"\"\"\n Make sure the scheduled call will be canceled\n \"\"\"\n\n if getattr(self, \"scheduled\", False):\n self.scheduled.cancel()\n\n def no_files(self):\n \"\"\"\n Notify user there are no input files, and exit\n \"\"\"\n self.write(\n \"\"\"gcc: fatal error: no input files\ncompilation terminated.\\n\"\"\"\n )\n self.exit()\n\n def version(self, short):\n \"\"\"\n Print long or short version, and exit\n \"\"\"\n\n # Generate version number\n version = \".\".join([str(v) for v in Command_gcc.APP_VERSION[:3]])\n version_short = \".\".join([str(v) for v in Command_gcc.APP_VERSION[:2]])\n\n if short:\n data = \"\"\"{} (Debian {}-8) {}\nCopyright (C) 2010 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\"\"\".format(\n Command_gcc.APP_NAME, version, version\n )\n else:\n data = \"\"\"Using built-in specs.\nCOLLECT_GCC=gcc\nCOLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/4.7/lto-wrapper\nTarget: x86_64-linux-gnu\nConfigured with: ../src/configure -v --with-pkgversion=\\'Debian {}-5\\' --with-bugurl=file:///usr/share/doc/gcc-{}/README.Bugs --enable-languages=c,c++,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-{} --enable-shared --enable-multiarch --enable-linker-build-id --with-system-zlib --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --with-gxx-include-dir=/usr/include/c++/{} --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-objc-gc --with-arch-32=i586 --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu\nThread model: posix\ngcc version {} (Debian {}-5)\"\"\".format(\n version, version_short, version_short, version_short, version, version\n )\n\n # Write\n self.write(f\"{data}\\n\")\n self.exit()\n\n def generate_file(self, outfile):\n data = b\"\"\n # TODO: make sure it is written to temp file, not downloads\n tmp_fname = \"{}_{}_{}_{}\".format(\n time.strftime(\"%Y%m%d%H%M%S\"),\n self.protocol.getProtoTransport().transportId,\n self.protocol.terminal.transport.session.id,\n re.sub(\"[^A-Za-z0-9]\", \"_\", outfile),\n )\n safeoutfile = os.path.join(\n CowrieConfig.get(\"honeypot\", \"download_path\"), tmp_fname\n )\n\n # Data contains random garbage from an actual file, so when\n # catting the file, you'll see some 'real' compiled data\n for i in range(random.randint(3, 15)):\n if random.randint(1, 3) == 1:\n data = data + Command_gcc.RANDOM_DATA[::-1]\n else:\n data = data + Command_gcc.RANDOM_DATA\n\n # Write random data\n with open(safeoutfile, \"wb\") as f:\n f.write(data)\n\n # Output file\n outfile = self.fs.resolve_path(outfile, self.protocol.cwd)\n\n # Create file for the protocol\n self.fs.mkfile(outfile, 0, 0, len(data), 33188)\n self.fs.update_realfile(self.fs.getfile(outfile), safeoutfile)\n self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid)\n\n # Segfault command\n class segfault_command(HoneyPotCommand):\n def call(self):\n self.write(\"Segmentation fault\\n\")\n\n # Trick the 'new compiled file' as an segfault\n self.protocol.commands[outfile] = segfault_command\n\n # Done\n self.exit()\n\n def arg_missing(self, arg):\n \"\"\"\n Print missing argument message, and exit\n \"\"\"\n self.write(f\"{Command_gcc.APP_NAME}: argument to '{arg}' is missing\\n\")\n self.exit()\n\n def help(self):\n \"\"\"\n Print help info, and exit\n \"\"\"\n\n self.write(\n \"\"\"Usage: gcc [options] file...\nOptions:\n -pass-exit-codes Exit with highest error code from a phase\n --help Display this information\n --target-help Display target specific command line options\n --help={common|optimizers|params|target|warnings|[^]{joined|separate|undocumented}}[,...]\n Display specific types of command line options\n (Use '-v --help' to display command line options of sub-processes)\n --version Display compiler version information\n -dumpspecs Display all of the built in spec strings\n -dumpversion Display the version of the compiler\n -dumpmachine Display the compiler's target processor\n -print-search-dirs Display the directories in the compiler's search path\n -print-libgcc-file-name Display the name of the compiler's companion library\n -print-file-name=<lib> Display the full path to library <lib>\n -print-prog-name=<prog> Display the full path to compiler component <prog>\n -print-multiarch Display the target's normalized GNU triplet, used as\n a component in the library path\n -print-multi-directory Display the root directory for versions of libgcc\n -print-multi-lib Display the mapping between command line options and\n multiple library search directories\n -print-multi-os-directory Display the relative path to OS libraries\n -print-sysroot Display the target libraries directory\n -print-sysroot-headers-suffix Display the sysroot suffix used to find headers\n -Wa,<options> Pass comma-separated <options> on to the assembler\n -Wp,<options> Pass comma-separated <options> on to the preprocessor\n -Wl,<options> Pass comma-separated <options> on to the linker\n -Xassembler <arg> Pass <arg> on to the assembler\n -Xpreprocessor <arg> Pass <arg> on to the preprocessor\n -Xlinker <arg> Pass <arg> on to the linker\n -save-temps Do not delete intermediate files\n -save-temps=<arg> Do not delete intermediate files\n -no-canonical-prefixes Do not canonicalize paths when building relative\n prefixes to other gcc components\n -pipe Use pipes rather than intermediate files\n -time Time the execution of each subprocess\n -specs=<file> Override built-in specs with the contents of <file>\n -std=<standard> Assume that the input sources are for <standard>\n --sysroot=<directory> Use <directory> as the root directory for headers\n and libraries\n -B <directory> Add <directory> to the compiler's search paths\n -v Display the programs invoked by the compiler\n -### Like -v but options quoted and commands not executed\n -E Preprocess only; do not compile, assemble or link\n -S Compile only; do not assemble or link\n -c Compile and assemble, but do not link\n -o <file> Place the output into <file>\n -pie Create a position independent executable\n -shared Create a shared library\n -x <language> Specify the language of the following input files\n Permissible languages include: c c++ assembler none\n 'none' means revert to the default behavior of\n guessing the language based on the file's extension\n\nOptions starting with -g, -f, -m, -O, -W, or --param are automatically\n passed on to the various sub-processes invoked by gcc. In order to pass\n other options on to these processes the -W<letter> options must be used.\n\nFor bug reporting instructions, please see:\n<file:///usr/share/doc/gcc-4.7/README.Bugs>.\n\"\"\"\n )\n self.exit()\n\n\ncommands[\"/usr/bin/gcc\"] = Command_gcc\ncommands[\"gcc\"] = Command_gcc\ncommands[\n \"/usr/bin/gcc-%s\" % (\".\".join([str(v) for v in Command_gcc.APP_VERSION[:2]]))\n] = Command_gcc\n", "path": "src/cowrie/commands/gcc.py" } ]
[ { "content": "# Copyright (c) 2013 Bas Stottelaar <basstottelaar [AT] gmail [DOT] com>\n\nfrom __future__ import annotations\n\nimport getopt\nimport os\nimport random\nimport re\nimport time\n\nfrom twisted.internet import reactor # type: ignore\nfrom twisted.internet.defer import Deferred\n\nfrom cowrie.core.config import CowrieConfig\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nclass Command_gcc(HoneyPotCommand):\n # Name of program. Under OSX, you might consider i686-apple-darwin11-llvm-gcc-X.X\n APP_NAME = \"gcc\"\n\n # GCC verson, used in help, version and the commandline name gcc-X.X\n APP_VERSION = (4, 7, 2)\n\n # Random binary data, which looks awesome. You could change this to whatever you want, but this\n # data will be put in the actual file and thus exposed to our hacker when he\\she cats the file.\n RANDOM_DATA = (\n b\"\\x6a\\x00\\x48\\x89\\xe5\\x48\\x83\\xe4\\xf0\\x48\\x8b\\x7d\\x08\\x48\\x8d\\x75\\x10\\x89\\xfa\"\n b\"\\x83\\xc2\\x01\\xc1\\xe2\\x03\\x48\\x01\\xf2\\x48\\x89\\xd1\\xeb\\x04\\x48\\x83\\xc1\\x08\\x48\"\n b\"\\x83\\x39\\x00\\x75\\xf6\\x48\\x83\\xc1\\x08\\xe8\\x0c\\x00\\x00\\x00\\x89\\xc7\\xe8\\xb9\\x00\"\n b\"\\x00\\x00\\xf4\\x90\\x90\\x90\\x90\\x55\\x48\\x89\\xe5\\x48\\x83\\xec\\x40\\x89\\x7d\\xfc\\x48\"\n b\"\\x89\\x75\\xf0\\x48\\x8b\\x45\\xf0\\x48\\x8b\\x00\\x48\\x83\\xf8\\x00\\x75\\x0c\\xb8\\x00\\x00\"\n b\"\\x00\\x00\\x89\\xc7\\xe8\\x8c\\x00\\x00\\x00\\x48\\x8b\\x45\\xf0\\x48\\x8b\\x40\\x08\\x30\\xc9\"\n b\"\\x48\\x89\\xc7\\x88\\xc8\\xe8\\x7e\\x00\\x00\\x00\\x89\\xc1\\x89\\x4d\\xdc\\x48\\x8d\\x0d\\xd8\"\n b\"\\x01\\x00\\x00\\x48\\x89\\xcf\\x48\\x89\\x4d\\xd0\\xe8\\x72\\x00\\x00\\x00\\x8b\\x4d\\xdc\\x30\"\n b\"\\xd2\\x48\\x8d\\x3d\\xa4\\x00\\x00\\x00\\x89\\xce\\x88\\x55\\xcf\\x48\\x89\\xc2\\x8a\\x45\\xcf\"\n b\"\\xe8\\x53\\x00\\x00\\x00\\x8b\\x45\\xdc\\x88\\x05\\xc3\\x01\\x00\\x00\\x8b\\x45\\xdc\\xc1\\xe8\"\n b\"\\x08\\x88\\x05\\xb8\\x01\\x00\\x00\\x8b\\x45\\xdc\\xc1\\xe8\\x10\\x88\\x05\\xad\\x01\\x00\\x00\"\n b\"\\x8b\\x45\\xdc\\xc1\\xe8\\x18\\x88\\x05\\xa2\\x01\\x00\\x00\\x48\\x8b\\x45\\xd0\\x48\\x89\\x45\"\n b\"\\xe0\\x48\\x8b\\x45\\xe0\\xff\\xd0\\x8b\\x45\\xec\\x48\\x83\\xc4\\x40\\x5d\\xc3\\xff\\x25\\x3e\"\n b\"\\x01\\x00\\x00\\xff\\x25\\x40\\x01\\x00\\x00\\xff\\x25\\x42\\x01\\x00\\x00\\xff\\x25\\x44\\x01\"\n b\"\\x00\\x00\\x4c\\x8d\\x1d\\x1d\\x01\\x00\\x00\\x41\\x53\\xff\\x25\\x0d\\x01\\x00\\x00\\x90\\x68\"\n b\"\\x00\\x00\\x00\\x00\\xe9\\xe6\\xff\\xff\\xff\\x68\\x0c\\x00\\x00\\x00\\xe9\\xdc\\xff\\xff\\xff\"\n b\"\\x68\\x1d\\x00\\x00\\x00\\xe9\\xd2\\xff\\xff\\xff\\x68\\x2b\\x00\\x00\\x00\\xe9\\xc8\\xff\\xff\"\n b\"\\xff\\x01\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x1c\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x0e\\x00\\x00\\x34\\x00\\x00\\x00\\x34\"\n b\"\\x00\\x00\\x00\\xf5\\x0e\\x00\\x00\\x00\\x00\\x00\\x00\\x34\\x00\\x00\\x00\\x03\\x00\\x00\\x00\"\n b\"\\x0c\\x00\\x02\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x00\\x01\\x40\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x01\\x00\\x00\\x00\"\n )\n\n scheduled: Deferred\n\n def start(self):\n \"\"\"\n Parse as much as possible from a GCC syntax and generate the output\n that is requested. The file that is generated can be read (and will)\n output garbage from an actual file, but when executed, it will generate\n a segmentation fault.\n\n The input files are expected to exists, but can be empty.\n\n Verified syntaxes, including non-existing files:\n * gcc test.c\n * gcc test.c -o program\n * gcc test1.c test2.c\n * gcc test1.c test2.c -o program\n * gcc test.c -o program -lm\n * gcc -g test.c -o program -lm\n * gcc test.c -DF_CPU=16000000 -I../etc -o program\n * gcc test.c -O2 -o optimized_program\n * gcc test.c -Wstrict-overflow=n -o overflowable_program\n\n Others:\n * gcc\n * gcc -h\n * gcc -v\n * gcc --help\n * gcc --version\n \"\"\"\n\n output_file = None\n input_files = 0\n complete = True\n\n # Parse options or display no files\n try:\n opts, args = getopt.gnu_getopt(\n self.args, \"ESchvgo:x:l:I:W:D:X:O:\", [\"help\", \"version\", \"param\"]\n )\n except getopt.GetoptError:\n self.no_files()\n return\n\n # Parse options\n for o, a in opts:\n if o in (\"-v\"):\n self.version(short=False)\n return\n elif o in (\"--version\"):\n self.version(short=True)\n return\n elif o in (\"-h\"):\n self.arg_missing(\"-h\")\n return\n elif o in (\"--help\"):\n self.help()\n return\n elif o in (\"-o\"):\n if len(a) == 0:\n self.arg_missing(\"-o\")\n else:\n output_file = a\n\n # Check for *.c or *.cpp files\n for value in args:\n if \".c\" in value.lower():\n sourcefile = self.fs.resolve_path(value, self.protocol.cwd)\n\n if self.fs.exists(sourcefile):\n input_files = input_files + 1\n else:\n self.write(\n f\"{Command_gcc.APP_NAME}: {value}: No such file or directory\\n\"\n )\n complete = False\n\n # To generate, or not\n if input_files > 0 and complete:\n timeout = 0.1 + random.random()\n\n # Schedule call to make it more time consuming and real\n self.scheduled = reactor.callLater( # type: ignore[attr-defined]\n timeout, self.generate_file, (output_file if output_file else \"a.out\")\n )\n else:\n self.no_files()\n\n def handle_CTRL_C(self):\n \"\"\"\n Make sure the scheduled call will be canceled\n \"\"\"\n\n if getattr(self, \"scheduled\", False):\n self.scheduled.cancel()\n\n def no_files(self):\n \"\"\"\n Notify user there are no input files, and exit\n \"\"\"\n self.write(\n \"\"\"gcc: fatal error: no input files\ncompilation terminated.\\n\"\"\"\n )\n self.exit()\n\n def version(self, short):\n \"\"\"\n Print long or short version, and exit\n \"\"\"\n\n # Generate version number\n version = \".\".join([str(v) for v in Command_gcc.APP_VERSION[:3]])\n version_short = \".\".join([str(v) for v in Command_gcc.APP_VERSION[:2]])\n\n if short:\n data = \"\"\"{} (Debian {}-8) {}\nCopyright (C) 2010 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\"\"\".format(\n Command_gcc.APP_NAME, version, version\n )\n else:\n data = \"\"\"Using built-in specs.\nCOLLECT_GCC=gcc\nCOLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/4.7/lto-wrapper\nTarget: x86_64-linux-gnu\nConfigured with: ../src/configure -v --with-pkgversion=\\'Debian {}-5\\' --with-bugurl=file:///usr/share/doc/gcc-{}/README.Bugs --enable-languages=c,c++,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-{} --enable-shared --enable-multiarch --enable-linker-build-id --with-system-zlib --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --with-gxx-include-dir=/usr/include/c++/{} --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-objc-gc --with-arch-32=i586 --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu\nThread model: posix\ngcc version {} (Debian {}-5)\"\"\".format(\n version, version_short, version_short, version_short, version, version\n )\n\n # Write\n self.write(f\"{data}\\n\")\n self.exit()\n\n def generate_file(self, outfile):\n data = b\"\"\n # TODO: make sure it is written to temp file, not downloads\n tmp_fname = \"{}_{}_{}_{}\".format(\n time.strftime(\"%Y%m%d%H%M%S\"),\n self.protocol.getProtoTransport().transportId,\n self.protocol.terminal.transport.session.id,\n re.sub(\"[^A-Za-z0-9]\", \"_\", outfile),\n )\n safeoutfile = os.path.join(\n CowrieConfig.get(\"honeypot\", \"download_path\"), tmp_fname\n )\n\n # Data contains random garbage from an actual file, so when\n # catting the file, you'll see some 'real' compiled data\n for i in range(random.randint(3, 15)):\n if random.randint(1, 3) == 1:\n data = data + Command_gcc.RANDOM_DATA[::-1]\n else:\n data = data + Command_gcc.RANDOM_DATA\n\n # Write random data\n with open(safeoutfile, \"wb\") as f:\n f.write(data)\n\n # Output file\n outfile = self.fs.resolve_path(outfile, self.protocol.cwd)\n\n # Create file for the protocol\n self.fs.mkfile(outfile, 0, 0, len(data), 33188)\n self.fs.update_realfile(self.fs.getfile(outfile), safeoutfile)\n self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid)\n\n # Segfault command\n class segfault_command(HoneyPotCommand):\n def call(self):\n self.write(\"Segmentation fault\\n\")\n\n # Trick the 'new compiled file' as an segfault\n self.protocol.commands[outfile] = segfault_command\n\n # Done\n self.exit()\n\n def arg_missing(self, arg):\n \"\"\"\n Print missing argument message, and exit\n \"\"\"\n self.write(f\"{Command_gcc.APP_NAME}: argument to '{arg}' is missing\\n\")\n self.exit()\n\n def help(self):\n \"\"\"\n Print help info, and exit\n \"\"\"\n\n self.write(\n \"\"\"Usage: gcc [options] file...\nOptions:\n -pass-exit-codes Exit with highest error code from a phase\n --help Display this information\n --target-help Display target specific command line options\n --help={common|optimizers|params|target|warnings|[^]{joined|separate|undocumented}}[,...]\n Display specific types of command line options\n (Use '-v --help' to display command line options of sub-processes)\n --version Display compiler version information\n -dumpspecs Display all of the built in spec strings\n -dumpversion Display the version of the compiler\n -dumpmachine Display the compiler's target processor\n -print-search-dirs Display the directories in the compiler's search path\n -print-libgcc-file-name Display the name of the compiler's companion library\n -print-file-name=<lib> Display the full path to library <lib>\n -print-prog-name=<prog> Display the full path to compiler component <prog>\n -print-multiarch Display the target's normalized GNU triplet, used as\n a component in the library path\n -print-multi-directory Display the root directory for versions of libgcc\n -print-multi-lib Display the mapping between command line options and\n multiple library search directories\n -print-multi-os-directory Display the relative path to OS libraries\n -print-sysroot Display the target libraries directory\n -print-sysroot-headers-suffix Display the sysroot suffix used to find headers\n -Wa,<options> Pass comma-separated <options> on to the assembler\n -Wp,<options> Pass comma-separated <options> on to the preprocessor\n -Wl,<options> Pass comma-separated <options> on to the linker\n -Xassembler <arg> Pass <arg> on to the assembler\n -Xpreprocessor <arg> Pass <arg> on to the preprocessor\n -Xlinker <arg> Pass <arg> on to the linker\n -save-temps Do not delete intermediate files\n -save-temps=<arg> Do not delete intermediate files\n -no-canonical-prefixes Do not canonicalize paths when building relative\n prefixes to other gcc components\n -pipe Use pipes rather than intermediate files\n -time Time the execution of each subprocess\n -specs=<file> Override built-in specs with the contents of <file>\n -std=<standard> Assume that the input sources are for <standard>\n --sysroot=<directory> Use <directory> as the root directory for headers\n and libraries\n -B <directory> Add <directory> to the compiler's search paths\n -v Display the programs invoked by the compiler\n -### Like -v but options quoted and commands not executed\n -E Preprocess only; do not compile, assemble or link\n -S Compile only; do not assemble or link\n -c Compile and assemble, but do not link\n -o <file> Place the output into <file>\n -pie Create a position independent executable\n -shared Create a shared library\n -x <language> Specify the language of the following input files\n Permissible languages include: c c++ assembler none\n 'none' means revert to the default behavior of\n guessing the language based on the file's extension\n\nOptions starting with -g, -f, -m, -O, -W, or --param are automatically\n passed on to the various sub-processes invoked by gcc. In order to pass\n other options on to these processes the -W<letter> options must be used.\n\nFor bug reporting instructions, please see:\n<file:///usr/share/doc/gcc-4.7/README.Bugs>.\n\"\"\"\n )\n self.exit()\n\n\ncommands[\"/usr/bin/gcc\"] = Command_gcc\ncommands[\"gcc\"] = Command_gcc\ncommands[\n \"/usr/bin/gcc-%s\" % (\".\".join([str(v) for v in Command_gcc.APP_VERSION[:2]]))\n] = Command_gcc\n", "path": "src/cowrie/commands/gcc.py" } ]
diff --git a/src/cowrie/commands/gcc.py b/src/cowrie/commands/gcc.py index 2e62fcb0ce..45e9db9e99 100644 --- a/src/cowrie/commands/gcc.py +++ b/src/cowrie/commands/gcc.py @@ -133,7 +133,7 @@ def start(self): # Schedule call to make it more time consuming and real self.scheduled = reactor.callLater( # type: ignore[attr-defined] - timeout, self.generate_file(output_file if output_file else "a.out") + timeout, self.generate_file, (output_file if output_file else "a.out") ) else: self.no_files()
Gcc "compile" file quits cowrie **Describe the bug** By running gcc to "compile" a file just quits cowrie **To Reproduce** Steps to reproduce the behavior: 1. Login on the honeypot 2. Compile the file with `gcc file.c` 4. See error and cowrie will quit(`Connection to localhost closed by remote host.`) **Expected behavior** To compile the file **Server (please complete the following information):** - OS: Ubuntu 22.04 - Python: Python 3.10 **Additional context** ``` 2022-08-25T15:35:01.948821Z [HoneyPotSSHTransport,7728,127.0.0.1] CMD: gcc hi.c -o p 2022-08-25T15:35:01.950607Z [HoneyPotSSHTransport,7728,127.0.0.1] Command found: gcc hi.c -o p 2022-08-25T15:35:01.952849Z [HoneyPotSSHTransport,7728,127.0.0.1] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 96, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 80, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 117, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 82, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/posixbase.py", line 683, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 248, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 253, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 144, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 148, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/transport.py", line 790, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/service.py", line 50, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 173, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 233, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 126, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 435, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 329, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 493, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 190, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 114, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 329, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 310, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 199, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/gcc.py", line 135, in start self.scheduled = reactor.callLater( # type: ignore[attr-defined] File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/base.py", line 868, in callLater assert builtins.callable(callable), f"{callable} is not callable" builtins.AssertionError: None is not callable ```
facebookresearch__hydra-1887
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-ray-launcher\",\n version=read_version(\"hydra_plugins/hydra_ray_launcher\", \"__init__.py\"),\n author=\"Jieru Hu\",\n author_email=\"[email protected]\",\n description=\"Hydra Ray launcher plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n ],\n install_requires=[\n \"boto3==1.17.17\",\n \"hydra-core>=1.1.0.dev7\",\n \"ray[default]==1.6.0\",\n # https://github.com/aio-libs/aiohttp/issues/6203\n \"aiohttp!=3.8.0\",\n \"cloudpickle==1.6.0\",\n \"pickle5==0.0.11\",\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_ray_launcher/setup.py" } ]
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-ray-launcher\",\n version=read_version(\"hydra_plugins/hydra_ray_launcher\", \"__init__.py\"),\n author=\"Jieru Hu\",\n author_email=\"[email protected]\",\n description=\"Hydra Ray launcher plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n ],\n install_requires=[\n \"boto3==1.17.17\",\n \"hydra-core>=1.1.0.dev7\",\n \"ray[default]==1.6.0\",\n # https://github.com/aio-libs/aiohttp/issues/6203\n \"aiohttp!=3.8.0\",\n \"cloudpickle==1.6.0\",\n \"pickle5==0.0.11\",\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_ray_launcher/setup.py" } ]
diff --git a/plugins/hydra_ray_launcher/integration_test_tools/README.md b/plugins/hydra_ray_launcher/integration_test_tools/README.md new file mode 100644 index 00000000000..eb3a4a12b4a --- /dev/null +++ b/plugins/hydra_ray_launcher/integration_test_tools/README.md @@ -0,0 +1,21 @@ +## Setting up a new testing AMI for ray launcher. + +To run the tool: + +- Make sure the dependencies in `setup_integration_test_ami.py` matches exactly ray launcher's `setup.py`. + +- Before running the tool, set up your aws profile with admin access to the Hydra test AWS account. +``` +AWS_PROFILE=jieru python create_integration_test_ami.py +``` +You will see a new AMI created in the output +```commandline +ec2.Image(id='ami-0d65d5647e065a180') current state pending +... +``` +Sometimes it could take hours for a new AMI to be created. Proceed to the next step once the +AMI becomes available. + +- Update the `AWS_RAY_AMI` env variable in `tests/test_ray_aws_launcher.py` +- Run the test locally and debug if needed. +- Create a PR and make sure all CI pass! diff --git a/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami.py b/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami.py index e7d670d8747..78277eab753 100644 --- a/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami.py +++ b/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami.py @@ -29,7 +29,7 @@ def _run_command(command: str) -> str: return output [email protected](config_name="create_integration_test_ami_config") [email protected](config_path=".", config_name="create_integration_test_ami_config") def set_up_machine(cfg: DictConfig) -> None: security_group_id = cfg.security_group_id assert security_group_id != "", "Security group cannot be empty!" diff --git a/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami_config.yaml b/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami_config.yaml index 47085e7c2be..bea49b03b15 100644 --- a/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami_config.yaml +++ b/plugins/hydra_ray_launcher/integration_test_tools/create_integration_test_ami_config.yaml @@ -2,6 +2,7 @@ security_group_id: sg-095ac7c26aa0d33bb python_versions: - 3.7 - 3.8 + - 3.9 ray_yaml: cluster_name: ray_test_base_AMI min_workers: 0 diff --git a/plugins/hydra_ray_launcher/integration_test_tools/setup_integration_test_ami.py b/plugins/hydra_ray_launcher/integration_test_tools/setup_integration_test_ami.py index 317296992a7..72d955a5b26 100644 --- a/plugins/hydra_ray_launcher/integration_test_tools/setup_integration_test_ami.py +++ b/plugins/hydra_ray_launcher/integration_test_tools/setup_integration_test_ami.py @@ -9,6 +9,7 @@ "ray[default]==1.6.0", "cloudpickle==1.6.0", "pickle5==0.0.11", + "aiohttp!=3.8", ] diff --git a/plugins/hydra_ray_launcher/news/1205.feature b/plugins/hydra_ray_launcher/news/1205.feature new file mode 100644 index 00000000000..20440f60a4f --- /dev/null +++ b/plugins/hydra_ray_launcher/news/1205.feature @@ -0,0 +1 @@ +Add support for python 3.9 diff --git a/plugins/hydra_ray_launcher/setup.py b/plugins/hydra_ray_launcher/setup.py index b0a71f58e97..4a64a977977 100644 --- a/plugins/hydra_ray_launcher/setup.py +++ b/plugins/hydra_ray_launcher/setup.py @@ -19,7 +19,7 @@ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", - # "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.9", "Operating System :: MacOS", "Operating System :: POSIX :: Linux", ], diff --git a/plugins/hydra_ray_launcher/tests/test_ray_aws_launcher.py b/plugins/hydra_ray_launcher/tests/test_ray_aws_launcher.py index e1c89def69a..2b9a2f557ca 100644 --- a/plugins/hydra_ray_launcher/tests/test_ray_aws_launcher.py +++ b/plugins/hydra_ray_launcher/tests/test_ray_aws_launcher.py @@ -19,6 +19,7 @@ LauncherTestSuite, ) from hydra.test_utils.test_utils import chdir_hydra_root, chdir_plugin_root +from omegaconf import OmegaConf from pytest import fixture, mark from hydra_plugins.hydra_ray_launcher.ray_aws_launcher import ( # type: ignore @@ -54,7 +55,7 @@ aws_not_configured = True -ami = os.environ.get("AWS_RAY_AMI", "ami-0d03f5ce1006a7ed5") +ami = os.environ.get("AWS_RAY_AMI", "ami-0436072b623a028fa") security_group_id = os.environ.get("AWS_RAY_SECURITY_GROUP", "sg-0a12b09a5ff961aee") subnet_id = os.environ.get("AWS_RAY_SUBNET", "subnet-acd2cfe7") instance_role = os.environ.get( @@ -116,32 +117,33 @@ chdir_plugin_root() +def run_command(commands: str) -> str: + log.info(f"running: {commands}") + output = subprocess.getoutput(commands) + log.info(f"outputs: {output}") + return output + + def build_ray_launcher_wheel(tmp_wheel_dir: str) -> str: chdir_hydra_root() plugin = "hydra_ray_launcher" os.chdir(Path("plugins") / plugin) log.info(f"Build wheel for {plugin}, save wheel to {tmp_wheel_dir}.") - subprocess.getoutput( - f"python setup.py sdist bdist_wheel && cp dist/*.whl {tmp_wheel_dir}" - ) + run_command(f"python setup.py sdist bdist_wheel && cp dist/*.whl {tmp_wheel_dir}") log.info("Download all plugin dependency wheels.") - subprocess.getoutput(f"pip download . -d {tmp_wheel_dir}") - plugin_wheel = subprocess.getoutput("ls dist/*.whl").split("/")[-1] + run_command(f"pip download . -d {tmp_wheel_dir}") + plugin_wheel = run_command("ls dist/*.whl").split("/")[-1] chdir_hydra_root() return plugin_wheel def build_core_wheel(tmp_wheel_dir: str) -> str: chdir_hydra_root() - subprocess.getoutput( - f"python setup.py sdist bdist_wheel && cp dist/*.whl {tmp_wheel_dir}" - ) + run_command(f"python setup.py sdist bdist_wheel && cp dist/*.whl {tmp_wheel_dir}") # download dependency wheel for hydra-core - subprocess.getoutput( - f"pip download -r requirements/requirements.txt -d {tmp_wheel_dir}" - ) - wheel = subprocess.getoutput("ls dist/*.whl").split("/")[-1] + run_command(f"pip download -r requirements/requirements.txt -d {tmp_wheel_dir}") + wheel = run_command("ls dist/*.whl").split("/")[-1] return wheel @@ -161,12 +163,16 @@ def upload_and_install_wheels( sdk.run_on_cluster( connect_config, cmd=f"pip install --no-index --find-links={temp_remote_wheel_dir} {temp_remote_wheel_dir}{core_wheel}", + with_output=True, + ) + log.info(f"Install plugin wheel {plugin_wheel} ") + log.info( + f"pip install --no-index --find-links={temp_remote_wheel_dir} {temp_remote_wheel_dir}{plugin_wheel}" ) - - log.info(f"Install plugin wheel {plugin_wheel}") sdk.run_on_cluster( connect_config, cmd=f"pip install --no-index --find-links={temp_remote_wheel_dir} {temp_remote_wheel_dir}{plugin_wheel}", + with_output=True, ) @@ -214,8 +220,8 @@ def manage_cluster() -> Generator[None, None, None]: # build all the wheels tmpdir = tempfile.mkdtemp() - plugin_wheel = build_ray_launcher_wheel(tmpdir) core_wheel = build_core_wheel(tmpdir) + plugin_wheel = build_ray_launcher_wheel(tmpdir) connect_config = { "cluster_name": cluster_name, "provider": { @@ -233,14 +239,28 @@ def manage_cluster() -> Generator[None, None, None]: "head_node": ray_nodes_conf, "worker_nodes": ray_nodes_conf, } + + # save connect_config as yaml, this could be useful for debugging + # you can run `ray attach <connect_config>.yaml` and log on to the AWS cluster for debugging. + conf = OmegaConf.create(connect_config) + with tempfile.NamedTemporaryFile(suffix=".yaml", delete=False) as fp: + OmegaConf.save(config=conf, f=fp.name, resolve=True) + log.info(f"Saving config to {fp.name}") + sdk.create_or_update_cluster( connect_config, ) sdk.run_on_cluster( - connect_config, run_env="auto", cmd=f"mkdir -p {temp_remote_dir}" + connect_config, + run_env="auto", + cmd=f"mkdir -p {temp_remote_dir}", + with_output=True, ) sdk.run_on_cluster( - connect_config, run_env="auto", cmd=f"mkdir -p {temp_remote_wheel_dir}" + connect_config, + run_env="auto", + cmd=f"mkdir -p {temp_remote_wheel_dir}", + with_output=True, ) upload_and_install_wheels(tmpdir, connect_config, core_wheel, plugin_wheel) validate_lib_version(connect_config)
[Ray-Plugin] Add support for Python 3.9 Python 3.9 support depends on https://github.com/ray-project/ray/issues/12788 Related to #1062
mitmproxy__mitmproxy-6127
[ { "content": "\"\"\"Take incoming HTTP requests and replay them with modified parameters.\"\"\"\nfrom mitmproxy import ctx\n\n\ndef request(flow):\n # Avoid an infinite loop by not replaying already replayed requests\n if flow.is_replay == \"request\":\n return\n flow = flow.copy()\n # Only interactive tools have a view. If we have one, add a duplicate entry\n # for our flow.\n if \"view\" in ctx.master.addons:\n ctx.master.commands.call(\"view.flows.add\", [flow])\n flow.request.path = \"/changed\"\n ctx.master.commands.call(\"replay.client\", [flow])\n", "path": "examples/addons/duplicate-modify-replay.py" } ]
[ { "content": "\"\"\"Take incoming HTTP requests and replay them with modified parameters.\"\"\"\nfrom mitmproxy import ctx\n\n\ndef request(flow):\n # Avoid an infinite loop by not replaying already replayed requests\n if flow.is_replay == \"request\":\n return\n flow = flow.copy()\n # Only interactive tools have a view. If we have one, add a duplicate entry\n # for our flow.\n if \"view\" in ctx.master.addons:\n ctx.master.commands.call(\"view.flows.duplicate\", [flow])\n flow.request.path = \"/changed\"\n ctx.master.commands.call(\"replay.client\", [flow])\n", "path": "examples/addons/duplicate-modify-replay.py" } ]
diff --git a/examples/addons/duplicate-modify-replay.py b/examples/addons/duplicate-modify-replay.py index 7138e5b6f3..f11eb7c582 100644 --- a/examples/addons/duplicate-modify-replay.py +++ b/examples/addons/duplicate-modify-replay.py @@ -10,6 +10,6 @@ def request(flow): # Only interactive tools have a view. If we have one, add a duplicate entry # for our flow. if "view" in ctx.master.addons: - ctx.master.commands.call("view.flows.add", [flow]) + ctx.master.commands.call("view.flows.duplicate", [flow]) flow.request.path = "/changed" ctx.master.commands.call("replay.client", [flow])
`view.flows.add` command does not exist but the examples reference it #### Problem Description The `view.flows.add` command does not exist but the example `duplicate-modify-replay.py` shows this command being used. `replay.client` seems to perform both the "add to view" and "replay" function.
pyodide__pyodide-55
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n if 'md5' not in pkg['source']:\n return\n checksum = pkg['source']['md5']\n CHUNK_SIZE = 1 << 16\n h = hashlib.md5()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid checksum\")\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = os.path.join(\n buildpath, os.path.basename(pkg['source']['url']))\n if not os.path.isfile(tarballpath):\n subprocess.run([\n 'wget', '-q', '-O', tarballpath, pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = os.path.join(buildpath, packagedir)\n if not os.path.isdir(srcpath):\n shutil.unpack_archive(tarballpath, buildpath)\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.patched')):\n return\n\n # Apply all of the patches\n orig_dir = os.getcwd()\n pkgdir = os.path.abspath(os.path.dirname(path))\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', os.path.join(pkgdir, patch)\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(os.path.join(pkgdir, src), os.path.join(srcpath, dst))\n\n with open(os.path.join(srcpath, '.patched'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n os.path.join(args.host, 'bin', 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = os.path.join(srcpath, 'build', 'lib')\n if os.path.isdir(purelib):\n libdir = purelib\n else:\n libdir = os.path.join(srcpath, 'build', 'lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.built')):\n return\n\n orig_dir = os.getcwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n os.path.join(args.host, 'bin', 'python3'),\n os.path.join(ROOTDIR, 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = os.path.abspath(os.path.dirname(path))\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(os.path.join(srcpath, '.built'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if os.path.isfile(os.path.join(buildpath, '.packaged')):\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python2',\n os.path.join(os.environ['EMSCRIPTEN'], 'tools', 'file_packager.py'),\n os.path.join(buildpath, name + '.data'),\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(os.path.join(buildpath, name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__'], check=True)\n subprocess.run([\n 'uglifyjs',\n os.path.join(buildpath, name + '.js'),\n '-o',\n os.path.join(buildpath, name + '.js')], check=True)\n\n with open(os.path.join(buildpath, '.packaged'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = os.path.dirname(path)\n orig_path = os.getcwd()\n os.chdir(dirpath)\n try:\n buildpath = os.path.join(dirpath, 'build')\n if not os.path.exists(buildpath):\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = os.path.abspath(args.package[0])\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n if 'md5' not in pkg['source']:\n return\n checksum = pkg['source']['md5']\n CHUNK_SIZE = 1 << 16\n h = hashlib.md5()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid checksum\")\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = os.path.join(\n buildpath, os.path.basename(pkg['source']['url']))\n if not os.path.isfile(tarballpath):\n subprocess.run([\n 'wget', '-q', '-O', tarballpath, pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = os.path.join(buildpath, packagedir)\n if not os.path.isdir(srcpath):\n shutil.unpack_archive(tarballpath, buildpath)\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.patched')):\n return\n\n # Apply all of the patches\n orig_dir = os.getcwd()\n pkgdir = os.path.abspath(os.path.dirname(path))\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', os.path.join(pkgdir, patch)\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(os.path.join(pkgdir, src), os.path.join(srcpath, dst))\n\n with open(os.path.join(srcpath, '.patched'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n os.path.join(args.host, 'bin', 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = os.path.join(srcpath, 'build', 'lib')\n if os.path.isdir(purelib):\n libdir = purelib\n else:\n libdir = os.path.join(srcpath, 'build', 'lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.built')):\n return\n\n orig_dir = os.getcwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n os.path.join(args.host, 'bin', 'python3'),\n os.path.join(ROOTDIR, 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = os.path.abspath(os.path.dirname(path))\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(os.path.join(srcpath, '.built'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if os.path.isfile(os.path.join(buildpath, '.packaged')):\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python2',\n os.path.join(os.environ['EMSCRIPTEN'], 'tools', 'file_packager.py'),\n os.path.join(buildpath, name + '.data'),\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(os.path.join(buildpath, name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'], check=True)\n subprocess.run([\n 'uglifyjs',\n os.path.join(buildpath, name + '.js'),\n '-o',\n os.path.join(buildpath, name + '.js')], check=True)\n\n with open(os.path.join(buildpath, '.packaged'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = os.path.dirname(path)\n orig_path = os.getcwd()\n os.chdir(dirpath)\n try:\n buildpath = os.path.join(dirpath, 'build')\n if not os.path.exists(buildpath):\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = os.path.abspath(args.package[0])\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py" } ]
diff --git a/Makefile b/Makefile index d879478edad..11f33f893f9 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ build/pyodide.asm.js: src/main.bc src/jsimport.bc src/jsproxy.bc src/js2python.b build/pyodide.asm.data: root/.built - python2 $(FILEPACKAGER) build/pyodide.asm.data --preload root/lib@lib --js-output=build/pyodide.asm.data.js + python2 $(FILEPACKAGER) build/pyodide.asm.data --preload root/lib@lib --js-output=build/pyodide.asm.data.js --use-preload-plugins uglifyjs build/pyodide.asm.data.js -o build/pyodide.asm.data.js diff --git a/emsdk/patches/async-module-loading.patch b/emsdk/patches/async-module-loading.patch new file mode 100644 index 00000000000..7ed625be2b5 --- /dev/null +++ b/emsdk/patches/async-module-loading.patch @@ -0,0 +1,253 @@ +diff --git a/src/library.js b/src/library.js +index 5fc87ab16..b8ead8fc0 100644 +--- a/src/library.js ++++ b/src/library.js +@@ -1755,39 +1755,44 @@ LibraryManager.library = { + return handle; + } + ++ var lib_module; + if (filename === '__self__') { + var handle = -1; +- var lib_module = Module; ++ lib_module = Module; + } else { +- var target = FS.findObject(filename); +- if (!target || target.isFolder || target.isDevice) { +- DLFCN.errorMsg = 'Could not find dynamic lib: ' + filename; +- return 0; +- } +- FS.forceLoadFile(target); ++ if (Module['preloadedWasm'] !== undefined && ++ Module['preloadedWasm'][filename] !== undefined) { ++ lib_module = Module['preloadedWasm'][filename]; ++ } else { ++ var target = FS.findObject(filename); ++ if (!target || target.isFolder || target.isDevice) { ++ DLFCN.errorMsg = 'Could not find dynamic lib: ' + filename; ++ return 0; ++ } ++ FS.forceLoadFile(target); + +- var lib_module; +- try { ++ try { + #if WASM +- // the shared library is a shared wasm library (see tools/shared.py WebAssembly.make_shared_library) +- var lib_data = FS.readFile(filename, { encoding: 'binary' }); +- if (!(lib_data instanceof Uint8Array)) lib_data = new Uint8Array(lib_data); +- //Module.printErr('libfile ' + filename + ' size: ' + lib_data.length); +- lib_module = loadWebAssemblyModule(lib_data); ++ // the shared library is a shared wasm library (see tools/shared.py WebAssembly.make_shared_library) ++ var lib_data = FS.readFile(filename, { encoding: 'binary' }); ++ if (!(lib_data instanceof Uint8Array)) lib_data = new Uint8Array(lib_data); ++ //Module.printErr('libfile ' + filename + ' size: ' + lib_data.length); ++ lib_module = loadWebAssemblyModule(lib_data); + #else +- // the shared library is a JS file, which we eval +- var lib_data = FS.readFile(filename, { encoding: 'utf8' }); +- lib_module = eval(lib_data)( +- alignFunctionTables(), +- Module +- ); ++ // the shared library is a JS file, which we eval ++ var lib_data = FS.readFile(filename, { encoding: 'utf8' }); ++ lib_module = eval(lib_data)( ++ alignFunctionTables(), ++ Module ++ ); + #endif +- } catch (e) { ++ } catch (e) { + #if ASSERTIONS +- Module.printErr('Error in loading dynamic library: ' + e); ++ Module.printErr('Error in loading dynamic library: ' + e); + #endif +- DLFCN.errorMsg = 'Could not evaluate dynamic lib: ' + filename + '\n' + e; +- return 0; ++ DLFCN.errorMsg = 'Could not evaluate dynamic lib: ' + filename + '\n' + e; ++ return 0; ++ } + } + + // Not all browsers support Object.keys(). +diff --git a/src/library_browser.js b/src/library_browser.js +index 36738391e..4258835ea 100644 +--- a/src/library_browser.js ++++ b/src/library_browser.js +@@ -225,6 +225,33 @@ var LibraryBrowser = { + }; + Module['preloadPlugins'].push(audioPlugin); + ++#if (WASM != 0) && (MAIN_MODULE != 0) ++ var wasmPlugin = {}; ++ wasmPlugin['asyncWasmLoadPromise'] = new Promise( ++ function(resolve, reject) { return resolve(); }); ++ wasmPlugin['canHandle'] = function(name) { ++ return !Module.noWasmDecoding && (name.endsWith('.so') || name.endsWith('.wasm')); ++ }; ++ wasmPlugin['handle'] = function(byteArray, name, onload, onerror) { ++ // loadWebAssemblyModule can not load modules out-of-order, so rather ++ // than just running the promises in parallel, this makes a chain of ++ // promises to run in series. ++ this.asyncWasmLoadPromise = this.asyncWasmLoadPromise.then( ++ function() { ++ return Module.loadWebAssemblyModule(byteArray, true) ++ }).then( ++ function(module) { ++ Module.preloadedWasm[name] = module; ++ onload(); ++ }, ++ function(err) { ++ console.warn("Couldn't instantiate wasm: " + name + " '" + err + "'"); ++ onerror(); ++ }); ++ }; ++ Module['preloadPlugins'].push(wasmPlugin); ++#endif ++ + // Canvas event setup + + function pointerLockChange() { +diff --git a/src/preamble.js b/src/preamble.js +index a757e8300..f529fe148 100644 +--- a/src/preamble.js ++++ b/src/preamble.js +@@ -1822,6 +1822,9 @@ function removeRunDependency(id) { + + Module["preloadedImages"] = {}; // maps url to image data + Module["preloadedAudios"] = {}; // maps url to audio data ++#if (WASM != 0) && (MAIN_MODULE != 0) ++Module["preloadedWasm"] = {}; // maps url to wasm instance exports ++#endif + + #if PGO + var PGOMonitor = { +diff --git a/src/support.js b/src/support.js +index f6c9842ff..99367db70 100644 +--- a/src/support.js ++++ b/src/support.js +@@ -86,7 +86,7 @@ function loadDynamicLibrary(lib) { + + #if WASM + // Loads a side module from binary data +-function loadWebAssemblyModule(binary) { ++function loadWebAssemblyModule(binary, loadAsync) { + var int32View = new Uint32Array(new Uint8Array(binary.subarray(0, 24)).buffer); + assert(int32View[0] == 0x6d736100, 'need to see wasm magic number'); // \0wasm + // we should see the dylink section right after the magic number and wasm version +@@ -166,59 +166,71 @@ function loadWebAssemblyModule(binary) { + oldTable.push(table.get(i)); + } + #endif +- // create a module from the instance +- var instance = new WebAssembly.Instance(new WebAssembly.Module(binary), info); ++ ++ function postInstantiation(instance) { ++ var exports = {}; + #if ASSERTIONS +- // the table should be unchanged +- assert(table === originalTable); +- assert(table === Module['wasmTable']); +- if (instance.exports['table']) { +- assert(table === instance.exports['table']); +- } +- // the old part of the table should be unchanged +- for (var i = 0; i < oldTableSize; i++) { +- assert(table.get(i) === oldTable[i], 'old table entries must remain the same'); +- } +- // verify that the new table region was filled in +- for (var i = 0; i < tableSize; i++) { +- assert(table.get(oldTableSize + i) !== undefined, 'table entry was not filled in'); +- } +-#endif +- var exports = {}; +- for (var e in instance.exports) { +- var value = instance.exports[e]; +- if (typeof value === 'object') { +- // a breaking change in the wasm spec, globals are now objects +- // https://github.com/WebAssembly/mutable-global/issues/1 +- value = value.value; ++ // the table should be unchanged ++ assert(table === originalTable); ++ assert(table === Module['wasmTable']); ++ if (instance.exports['table']) { ++ assert(table === instance.exports['table']); ++ } ++ // the old part of the table should be unchanged ++ for (var i = 0; i < oldTableSize; i++) { ++ assert(table.get(i) === oldTable[i], 'old table entries must remain the same'); ++ } ++ // verify that the new table region was filled in ++ for (var i = 0; i < tableSize; i++) { ++ assert(table.get(oldTableSize + i) !== undefined, 'table entry was not filled in'); + } +- if (typeof value === 'number') { +- // relocate it - modules export the absolute value, they can't relocate before they export ++#endif ++ for (var e in instance.exports) { ++ var value = instance.exports[e]; ++ if (typeof value === 'object') { ++ // a breaking change in the wasm spec, globals are now objects ++ // https://github.com/WebAssembly/mutable-global/issues/1 ++ value = value.value; ++ } ++ if (typeof value === 'number') { ++ // relocate it - modules export the absolute value, they can't relocate before they export + #if EMULATED_FUNCTION_POINTERS +- // it may be a function pointer +- if (e.substr(0, 3) == 'fp$' && typeof instance.exports[e.substr(3)] === 'function') { +- value = value + env['tableBase']; +- } else { ++ // it may be a function pointer ++ if (e.substr(0, 3) == 'fp$' && typeof instance.exports[e.substr(3)] === 'function') { ++ value = value + env['tableBase']; ++ } else { + #endif +- value = value + env['memoryBase']; ++ value = value + env['memoryBase']; + #if EMULATED_FUNCTION_POINTERS +- } ++ } + #endif ++ } ++ exports[e] = value; + } +- exports[e] = value; +- } +- // initialize the module +- var init = exports['__post_instantiate']; +- if (init) { +- if (runtimeInitialized) { +- init(); +- } else { +- // we aren't ready to run compiled code yet +- __ATINIT__.push(init); ++ // initialize the module ++ var init = exports['__post_instantiate']; ++ if (init) { ++ if (runtimeInitialized) { ++ init(); ++ } else { ++ // we aren't ready to run compiled code yet ++ __ATINIT__.push(init); ++ } + } ++ return exports; ++ } ++ ++ if (loadAsync) { ++ return WebAssembly.instantiate(binary, info).then(function(result) { ++ return postInstantiation(result.instance); ++ }); ++ } else { ++ var instance = new WebAssembly.Instance(new WebAssembly.Module(binary), info); ++ return postInstantiation(instance); + } +- return exports; + } ++Module['loadWebAssemblyModule'] = loadWebAssemblyModule; ++ + #endif // WASM + #endif // RELOCATABLE + diff --git a/src/pyodide.js b/src/pyodide.js index 52fc51fe0bf..d104e461940 100644 --- a/src/pyodide.js +++ b/src/pyodide.js @@ -71,22 +71,44 @@ var languagePluginLoader = new Promise((resolve, reject) => { let Module = {}; window.Module = Module; + Module.noImageDecoding = true; + Module.noAudioDecoding = true; + let isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1; + if (isFirefox) { + Module.noWasmDecoding = true; + } + let wasm_promise = WebAssembly.compileStreaming(fetch(wasmURL)); Module.instantiateWasm = (info, receiveInstance) => { wasm_promise.then(module => WebAssembly.instantiate(module, info)) .then(instance => receiveInstance(instance)); return {}; }; + Module.filePackagePrefixURL = baseURL; - Module.postRun = () => { - delete window.Module; - fetch(`${baseURL}packages.json`) + var postRunPromise = new Promise((resolve, reject) => { + Module.postRun = () => { + delete window.Module; + fetch(`${baseURL}packages.json`) .then((response) => response.json()) .then((json) => { window.pyodide.packages = json; resolve(); }); - }; + }; + }); + + var dataLoadPromise = new Promise((resolve, reject) => { + Module.monitorRunDependencies = + (n) => { + if (n === 0) { + delete Module.monitorRunDependencies; + resolve(); + } + } + }); + + Promise.all([ postRunPromise, dataLoadPromise ]).then(() => resolve()); let data_script = document.createElement('script'); data_script.src = `${baseURL}pyodide.asm.data.js`; diff --git a/tools/buildpkg.py b/tools/buildpkg.py index b774086b450..0c5cb0d8dee 100755 --- a/tools/buildpkg.py +++ b/tools/buildpkg.py @@ -144,7 +144,8 @@ def package_files(buildpath, srcpath, pkg, args): '--js-output={}'.format(os.path.join(buildpath, name + '.js')), '--export-name=pyodide', '--exclude', '*.wasm.pre', - '--exclude', '__pycache__'], check=True) + '--exclude', '__pycache__', + '--use-preload-plugins'], check=True) subprocess.run([ 'uglifyjs', os.path.join(buildpath, name + '.js'),
Make work on Chrome Make work on Chrome
pallets__click-1587
[ { "content": "import inspect\nimport io\nimport itertools\nimport os\nimport struct\nimport sys\n\nfrom ._compat import DEFAULT_COLUMNS\nfrom ._compat import get_winterm_size\nfrom ._compat import isatty\nfrom ._compat import strip_ansi\nfrom ._compat import WIN\nfrom .exceptions import Abort\nfrom .exceptions import UsageError\nfrom .globals import resolve_color_default\nfrom .types import Choice\nfrom .types import convert_type\nfrom .types import Path\nfrom .utils import echo\nfrom .utils import LazyFile\n\n# The prompt functions to use. The doc tools currently override these\n# functions to customize how they work.\nvisible_prompt_func = input\n\n_ansi_colors = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n \"reset\": 39,\n \"bright_black\": 90,\n \"bright_red\": 91,\n \"bright_green\": 92,\n \"bright_yellow\": 93,\n \"bright_blue\": 94,\n \"bright_magenta\": 95,\n \"bright_cyan\": 96,\n \"bright_white\": 97,\n}\n_ansi_reset_all = \"\\033[0m\"\n\n\ndef hidden_prompt_func(prompt):\n import getpass\n\n return getpass.getpass(prompt)\n\n\ndef _build_prompt(\n text, suffix, show_default=False, default=None, show_choices=True, type=None\n):\n prompt = text\n if type is not None and show_choices and isinstance(type, Choice):\n prompt += f\" ({', '.join(map(str, type.choices))})\"\n if default is not None and show_default:\n prompt = f\"{prompt} [{_format_default(default)}]\"\n return f\"{prompt}{suffix}\"\n\n\ndef _format_default(default):\n if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, \"name\"):\n return default.name\n\n return default\n\n\ndef prompt(\n text,\n default=None,\n hide_input=False,\n confirmation_prompt=False,\n type=None,\n value_proc=None,\n prompt_suffix=\": \",\n show_default=True,\n err=False,\n show_choices=True,\n):\n \"\"\"Prompts a user for input. This is a convenience function that can\n be used to prompt a user for input later.\n\n If the user aborts the input by sending a interrupt signal, this\n function will catch it and raise a :exc:`Abort` exception.\n\n .. versionadded:: 7.0\n Added the show_choices parameter.\n\n .. versionadded:: 6.0\n Added unicode support for cmd.exe on Windows.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param text: the text to show for the prompt.\n :param default: the default value to use if no input happens. If this\n is not given it will prompt until it's aborted.\n :param hide_input: if this is set to true then the input value will\n be hidden.\n :param confirmation_prompt: asks for confirmation for the value.\n :param type: the type to use to check the value against.\n :param value_proc: if this parameter is provided it's a function that\n is invoked instead of the type conversion to\n convert a value.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n :param show_choices: Show or hide choices if the passed type is a Choice.\n For example if type is a Choice of either day or week,\n show_choices is true and text is \"Group by\" then the\n prompt will be \"Group by (day, week): \".\n \"\"\"\n result = None\n\n def prompt_func(text):\n f = hidden_prompt_func if hide_input else visible_prompt_func\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(text, nl=False, err=err)\n return f(\"\")\n except (KeyboardInterrupt, EOFError):\n # getpass doesn't print a newline if the user aborts input with ^C.\n # Allegedly this behavior is inherited from getpass(3).\n # A doc bug has been filed at https://bugs.python.org/issue24711\n if hide_input:\n echo(None, err=err)\n raise Abort()\n\n if value_proc is None:\n value_proc = convert_type(type, default)\n\n prompt = _build_prompt(\n text, prompt_suffix, show_default, default, show_choices, type\n )\n\n while 1:\n while 1:\n value = prompt_func(prompt)\n if value:\n break\n elif default is not None:\n if isinstance(value_proc, Path):\n # validate Path default value(exists, dir_okay etc.)\n value = default\n break\n return default\n try:\n result = value_proc(value)\n except UsageError as e:\n echo(f\"Error: {e.message}\", err=err) # noqa: B306\n continue\n if not confirmation_prompt:\n return result\n while 1:\n value2 = prompt_func(\"Repeat for confirmation: \")\n if value2:\n break\n if value == value2:\n return result\n echo(\"Error: the two entered values do not match\", err=err)\n\n\ndef confirm(\n text, default=False, abort=False, prompt_suffix=\": \", show_default=True, err=False\n):\n \"\"\"Prompts for confirmation (yes/no question).\n\n If the user aborts the input by sending a interrupt signal this\n function will catch it and raise a :exc:`Abort` exception.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param text: the question to ask.\n :param default: the default for the prompt.\n :param abort: if this is set to `True` a negative answer aborts the\n exception by raising :exc:`Abort`.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n prompt = _build_prompt(\n text, prompt_suffix, show_default, \"Y/n\" if default else \"y/N\"\n )\n while 1:\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(prompt, nl=False, err=err)\n value = visible_prompt_func(\"\").lower().strip()\n except (KeyboardInterrupt, EOFError):\n raise Abort()\n if value in (\"y\", \"yes\"):\n rv = True\n elif value in (\"n\", \"no\"):\n rv = False\n elif value == \"\":\n rv = default\n else:\n echo(\"Error: invalid input\", err=err)\n continue\n break\n if abort and not rv:\n raise Abort()\n return rv\n\n\ndef get_terminal_size():\n \"\"\"Returns the current size of the terminal as tuple in the form\n ``(width, height)`` in columns and rows.\n \"\"\"\n import shutil\n\n if hasattr(shutil, \"get_terminal_size\"):\n return shutil.get_terminal_size()\n\n # We provide a sensible default for get_winterm_size() when being invoked\n # inside a subprocess. Without this, it would not provide a useful input.\n if get_winterm_size is not None:\n size = get_winterm_size()\n if size == (0, 0):\n return (79, 24)\n else:\n return size\n\n def ioctl_gwinsz(fd):\n try:\n import fcntl\n import termios\n\n cr = struct.unpack(\"hh\", fcntl.ioctl(fd, termios.TIOCGWINSZ, \"1234\"))\n except Exception:\n return\n return cr\n\n cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n try:\n cr = ioctl_gwinsz(fd)\n finally:\n os.close(fd)\n except Exception:\n pass\n if not cr or not cr[0] or not cr[1]:\n cr = (os.environ.get(\"LINES\", 25), os.environ.get(\"COLUMNS\", DEFAULT_COLUMNS))\n return int(cr[1]), int(cr[0])\n\n\ndef echo_via_pager(text_or_generator, color=None):\n \"\"\"This function takes a text and shows it via an environment specific\n pager on stdout.\n\n .. versionchanged:: 3.0\n Added the `color` flag.\n\n :param text_or_generator: the text to page, or alternatively, a\n generator emitting the text to page.\n :param color: controls if the pager supports ANSI colors or not. The\n default is autodetection.\n \"\"\"\n color = resolve_color_default(color)\n\n if inspect.isgeneratorfunction(text_or_generator):\n i = text_or_generator()\n elif isinstance(text_or_generator, str):\n i = [text_or_generator]\n else:\n i = iter(text_or_generator)\n\n # convert every element of i to a text type if necessary\n text_generator = (el if isinstance(el, str) else str(el) for el in i)\n\n from ._termui_impl import pager\n\n return pager(itertools.chain(text_generator, \"\\n\"), color)\n\n\ndef progressbar(\n iterable=None,\n length=None,\n label=None,\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n fill_char=\"#\",\n empty_char=\"-\",\n bar_template=\"%(label)s [%(bar)s] %(info)s\",\n info_sep=\" \",\n width=36,\n file=None,\n color=None,\n):\n \"\"\"This function creates an iterable context manager that can be used\n to iterate over something while showing a progress bar. It will\n either iterate over the `iterable` or `length` items (that are counted\n up). While iteration happens, this function will print a rendered\n progress bar to the given `file` (defaults to stdout) and will attempt\n to calculate remaining time and more. By default, this progress bar\n will not be rendered if the file is not a terminal.\n\n The context manager creates the progress bar. When the context\n manager is entered the progress bar is already created. With every\n iteration over the progress bar, the iterable passed to the bar is\n advanced and the bar is updated. When the context manager exits,\n a newline is printed and the progress bar is finalized on screen.\n\n Note: The progress bar is currently designed for use cases where the\n total progress can be expected to take at least several seconds.\n Because of this, the ProgressBar class object won't display\n progress that is considered too fast, and progress where the time\n between steps is less than a second.\n\n No printing must happen or the progress bar will be unintentionally\n destroyed.\n\n Example usage::\n\n with progressbar(items) as bar:\n for item in bar:\n do_something_with(item)\n\n Alternatively, if no iterable is specified, one can manually update the\n progress bar through the `update()` method instead of directly\n iterating over the progress bar. The update method accepts the number\n of steps to increment the bar with::\n\n with progressbar(length=chunks.total_bytes) as bar:\n for chunk in chunks:\n process_chunk(chunk)\n bar.update(chunks.bytes)\n\n The ``update()`` method also takes an optional value specifying the\n ``current_item`` at the new position. This is useful when used\n together with ``item_show_func`` to customize the output for each\n manual step::\n\n with click.progressbar(\n length=total_size,\n label='Unzipping archive',\n item_show_func=lambda a: a.filename\n ) as bar:\n for archive in zip_file:\n archive.extract()\n bar.update(archive.size, archive)\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `color` parameter. Added a `update` method to the\n progressbar object.\n\n :param iterable: an iterable to iterate over. If not provided the length\n is required.\n :param length: the number of items to iterate over. By default the\n progressbar will attempt to ask the iterator about its\n length, which might or might not work. If an iterable is\n also provided this parameter can be used to override the\n length. If an iterable is not provided the progress bar\n will iterate over a range of that length.\n :param label: the label to show next to the progress bar.\n :param show_eta: enables or disables the estimated time display. This is\n automatically disabled if the length cannot be\n determined.\n :param show_percent: enables or disables the percentage display. The\n default is `True` if the iterable has a length or\n `False` if not.\n :param show_pos: enables or disables the absolute position display. The\n default is `False`.\n :param item_show_func: a function called with the current item which\n can return a string to show the current item\n next to the progress bar. Note that the current\n item can be `None`!\n :param fill_char: the character to use to show the filled part of the\n progress bar.\n :param empty_char: the character to use to show the non-filled part of\n the progress bar.\n :param bar_template: the format string to use as template for the bar.\n The parameters in it are ``label`` for the label,\n ``bar`` for the progress bar and ``info`` for the\n info section.\n :param info_sep: the separator between multiple info items (eta etc.)\n :param width: the width of the progress bar in characters, 0 means full\n terminal width\n :param file: the file to write to. If this is not a terminal then\n only the label is printed.\n :param color: controls if the terminal supports ANSI colors or not. The\n default is autodetection. This is only needed if ANSI\n codes are included anywhere in the progress bar output\n which is not the case by default.\n \"\"\"\n from ._termui_impl import ProgressBar\n\n color = resolve_color_default(color)\n return ProgressBar(\n iterable=iterable,\n length=length,\n show_eta=show_eta,\n show_percent=show_percent,\n show_pos=show_pos,\n item_show_func=item_show_func,\n fill_char=fill_char,\n empty_char=empty_char,\n bar_template=bar_template,\n info_sep=info_sep,\n file=file,\n label=label,\n width=width,\n color=color,\n )\n\n\ndef clear():\n \"\"\"Clears the terminal screen. This will have the effect of clearing\n the whole visible space of the terminal and moving the cursor to the\n top left. This does not do anything if not connected to a terminal.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isatty(sys.stdout):\n return\n # If we're on Windows and we don't have colorama available, then we\n # clear the screen by shelling out. Otherwise we can use an escape\n # sequence.\n if WIN:\n os.system(\"cls\")\n else:\n sys.stdout.write(\"\\033[2J\\033[1;1H\")\n\n\ndef style(\n text,\n fg=None,\n bg=None,\n bold=None,\n dim=None,\n underline=None,\n blink=None,\n reverse=None,\n reset=True,\n):\n \"\"\"Styles a text with ANSI styles and returns the new string. By\n default the styling is self contained which means that at the end\n of the string a reset code is issued. This can be prevented by\n passing ``reset=False``.\n\n Examples::\n\n click.echo(click.style('Hello World!', fg='green'))\n click.echo(click.style('ATTENTION!', blink=True))\n click.echo(click.style('Some things', reverse=True, fg='cyan'))\n\n Supported color names:\n\n * ``black`` (might be a gray)\n * ``red``\n * ``green``\n * ``yellow`` (might be an orange)\n * ``blue``\n * ``magenta``\n * ``cyan``\n * ``white`` (might be light gray)\n * ``bright_black``\n * ``bright_red``\n * ``bright_green``\n * ``bright_yellow``\n * ``bright_blue``\n * ``bright_magenta``\n * ``bright_cyan``\n * ``bright_white``\n * ``reset`` (reset the color code only)\n\n .. versionadded:: 2.0\n\n .. versionadded:: 7.0\n Added support for bright colors.\n\n :param text: the string to style with ansi codes.\n :param fg: if provided this will become the foreground color.\n :param bg: if provided this will become the background color.\n :param bold: if provided this will enable or disable bold mode.\n :param dim: if provided this will enable or disable dim mode. This is\n badly supported.\n :param underline: if provided this will enable or disable underline.\n :param blink: if provided this will enable or disable blinking.\n :param reverse: if provided this will enable or disable inverse\n rendering (foreground becomes background and the\n other way round).\n :param reset: by default a reset-all code is added at the end of the\n string which means that styles do not carry over. This\n can be disabled to compose styles.\n \"\"\"\n bits = []\n if fg:\n try:\n bits.append(f\"\\033[{_ansi_colors[fg]}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {fg!r}\")\n if bg:\n try:\n bits.append(f\"\\033[{_ansi_colors[bg] + 10}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {bg!r}\")\n if bold is not None:\n bits.append(f\"\\033[{1 if bold else 22}m\")\n if dim is not None:\n bits.append(f\"\\033[{2 if dim else 22}m\")\n if underline is not None:\n bits.append(f\"\\033[{4 if underline else 24}m\")\n if blink is not None:\n bits.append(f\"\\033[{5 if blink else 25}m\")\n if reverse is not None:\n bits.append(f\"\\033[{7 if reverse else 27}m\")\n bits.append(text)\n if reset:\n bits.append(_ansi_reset_all)\n return \"\".join(bits)\n\n\ndef unstyle(text):\n \"\"\"Removes ANSI styling information from a string. Usually it's not\n necessary to use this function as Click's echo function will\n automatically remove styling if necessary.\n\n .. versionadded:: 2.0\n\n :param text: the text to remove style information from.\n \"\"\"\n return strip_ansi(text)\n\n\ndef secho(message=None, file=None, nl=True, err=False, color=None, **styles):\n \"\"\"This function combines :func:`echo` and :func:`style` into one\n call. As such the following two calls are the same::\n\n click.secho('Hello World!', fg='green')\n click.echo(click.style('Hello World!', fg='green'))\n\n All keyword arguments are forwarded to the underlying functions\n depending on which one they go with.\n\n .. versionadded:: 2.0\n \"\"\"\n if message is not None:\n message = style(message, **styles)\n return echo(message, file=file, nl=nl, err=err, color=color)\n\n\ndef edit(\n text=None, editor=None, env=None, require_save=True, extension=\".txt\", filename=None\n):\n r\"\"\"Edits the given text in the defined editor. If an editor is given\n (should be the full path to the executable but the regular operating\n system search path is used for finding the executable) it overrides\n the detected editor. Optionally, some environment variables can be\n used. If the editor is closed without changes, `None` is returned. In\n case a file is edited directly the return value is always `None` and\n `require_save` and `extension` are ignored.\n\n If the editor cannot be opened a :exc:`UsageError` is raised.\n\n Note for Windows: to simplify cross-platform usage, the newlines are\n automatically converted from POSIX to Windows and vice versa. As such,\n the message here will have ``\\n`` as newline markers.\n\n :param text: the text to edit.\n :param editor: optionally the editor to use. Defaults to automatic\n detection.\n :param env: environment variables to forward to the editor.\n :param require_save: if this is true, then not saving in the editor\n will make the return value become `None`.\n :param extension: the extension to tell the editor about. This defaults\n to `.txt` but changing this might change syntax\n highlighting.\n :param filename: if provided it will edit this file instead of the\n provided text contents. It will not use a temporary\n file as an indirection in that case.\n \"\"\"\n from ._termui_impl import Editor\n\n editor = Editor(\n editor=editor, env=env, require_save=require_save, extension=extension\n )\n if filename is None:\n return editor.edit(text)\n editor.edit_file(filename)\n\n\ndef launch(url, wait=False, locate=False):\n \"\"\"This function launches the given URL (or filename) in the default\n viewer application for this file type. If this is an executable, it\n might launch the executable in a new session. The return value is\n the exit code of the launched application. Usually, ``0`` indicates\n success.\n\n Examples::\n\n click.launch('https://click.palletsprojects.com/')\n click.launch('/my/downloaded/file', locate=True)\n\n .. versionadded:: 2.0\n\n :param url: URL or filename of the thing to launch.\n :param wait: waits for the program to stop.\n :param locate: if this is set to `True` then instead of launching the\n application associated with the URL it will attempt to\n launch a file manager with the file located. This\n might have weird effects if the URL does not point to\n the filesystem.\n \"\"\"\n from ._termui_impl import open_url\n\n return open_url(url, wait=wait, locate=locate)\n\n\n# If this is provided, getchar() calls into this instead. This is used\n# for unittesting purposes.\n_getchar = None\n\n\ndef getchar(echo=False):\n \"\"\"Fetches a single character from the terminal and returns it. This\n will always return a unicode character and under certain rare\n circumstances this might return more than one character. The\n situations which more than one character is returned is when for\n whatever reason multiple characters end up in the terminal buffer or\n standard input was not actually a terminal.\n\n Note that this will always read from the terminal, even if something\n is piped into the standard input.\n\n Note for Windows: in rare cases when typing non-ASCII characters, this\n function might wait for a second character and then return both at once.\n This is because certain Unicode characters look like special-key markers.\n\n .. versionadded:: 2.0\n\n :param echo: if set to `True`, the character read will also show up on\n the terminal. The default is to not show it.\n \"\"\"\n f = _getchar\n if f is None:\n from ._termui_impl import getchar as f\n return f(echo)\n\n\ndef raw_terminal():\n from ._termui_impl import raw_terminal as f\n\n return f()\n\n\ndef pause(info=\"Press any key to continue ...\", err=False):\n \"\"\"This command stops execution and waits for the user to press any\n key to continue. This is similar to the Windows batch \"pause\"\n command. If the program is not run through a terminal, this command\n will instead do nothing.\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param info: the info string to print before pausing.\n :param err: if set to message goes to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n if not isatty(sys.stdin) or not isatty(sys.stdout):\n return\n try:\n if info:\n echo(info, nl=False, err=err)\n try:\n getchar()\n except (KeyboardInterrupt, EOFError):\n pass\n finally:\n if info:\n echo(err=err)\n", "path": "src/click/termui.py" } ]
[ { "content": "import inspect\nimport io\nimport itertools\nimport os\nimport struct\nimport sys\n\nfrom ._compat import DEFAULT_COLUMNS\nfrom ._compat import get_winterm_size\nfrom ._compat import isatty\nfrom ._compat import strip_ansi\nfrom ._compat import WIN\nfrom .exceptions import Abort\nfrom .exceptions import UsageError\nfrom .globals import resolve_color_default\nfrom .types import Choice\nfrom .types import convert_type\nfrom .types import Path\nfrom .utils import echo\nfrom .utils import LazyFile\n\n# The prompt functions to use. The doc tools currently override these\n# functions to customize how they work.\nvisible_prompt_func = input\n\n_ansi_colors = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n \"reset\": 39,\n \"bright_black\": 90,\n \"bright_red\": 91,\n \"bright_green\": 92,\n \"bright_yellow\": 93,\n \"bright_blue\": 94,\n \"bright_magenta\": 95,\n \"bright_cyan\": 96,\n \"bright_white\": 97,\n}\n_ansi_reset_all = \"\\033[0m\"\n\n\ndef hidden_prompt_func(prompt):\n import getpass\n\n return getpass.getpass(prompt)\n\n\ndef _build_prompt(\n text, suffix, show_default=False, default=None, show_choices=True, type=None\n):\n prompt = text\n if type is not None and show_choices and isinstance(type, Choice):\n prompt += f\" ({', '.join(map(str, type.choices))})\"\n if default is not None and show_default:\n prompt = f\"{prompt} [{_format_default(default)}]\"\n return f\"{prompt}{suffix}\"\n\n\ndef _format_default(default):\n if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, \"name\"):\n return default.name\n\n return default\n\n\ndef prompt(\n text,\n default=None,\n hide_input=False,\n confirmation_prompt=False,\n type=None,\n value_proc=None,\n prompt_suffix=\": \",\n show_default=True,\n err=False,\n show_choices=True,\n):\n \"\"\"Prompts a user for input. This is a convenience function that can\n be used to prompt a user for input later.\n\n If the user aborts the input by sending a interrupt signal, this\n function will catch it and raise a :exc:`Abort` exception.\n\n .. versionadded:: 7.0\n Added the show_choices parameter.\n\n .. versionadded:: 6.0\n Added unicode support for cmd.exe on Windows.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param text: the text to show for the prompt.\n :param default: the default value to use if no input happens. If this\n is not given it will prompt until it's aborted.\n :param hide_input: if this is set to true then the input value will\n be hidden.\n :param confirmation_prompt: asks for confirmation for the value.\n :param type: the type to use to check the value against.\n :param value_proc: if this parameter is provided it's a function that\n is invoked instead of the type conversion to\n convert a value.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n :param show_choices: Show or hide choices if the passed type is a Choice.\n For example if type is a Choice of either day or week,\n show_choices is true and text is \"Group by\" then the\n prompt will be \"Group by (day, week): \".\n \"\"\"\n result = None\n\n def prompt_func(text):\n f = hidden_prompt_func if hide_input else visible_prompt_func\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(text, nl=False, err=err)\n return f(\"\")\n except (KeyboardInterrupt, EOFError):\n # getpass doesn't print a newline if the user aborts input with ^C.\n # Allegedly this behavior is inherited from getpass(3).\n # A doc bug has been filed at https://bugs.python.org/issue24711\n if hide_input:\n echo(None, err=err)\n raise Abort()\n\n if value_proc is None:\n value_proc = convert_type(type, default)\n\n prompt = _build_prompt(\n text, prompt_suffix, show_default, default, show_choices, type\n )\n\n while 1:\n while 1:\n value = prompt_func(prompt)\n if value:\n break\n elif default is not None:\n if isinstance(value_proc, Path):\n # validate Path default value(exists, dir_okay etc.)\n value = default\n break\n return default\n try:\n result = value_proc(value)\n except UsageError as e:\n if hide_input:\n echo(\"Error: the value you entered was invalid\", err=err)\n else:\n echo(f\"Error: {e.message}\", err=err) # noqa: B306\n continue\n if not confirmation_prompt:\n return result\n while 1:\n value2 = prompt_func(\"Repeat for confirmation: \")\n if value2:\n break\n if value == value2:\n return result\n echo(\"Error: the two entered values do not match\", err=err)\n\n\ndef confirm(\n text, default=False, abort=False, prompt_suffix=\": \", show_default=True, err=False\n):\n \"\"\"Prompts for confirmation (yes/no question).\n\n If the user aborts the input by sending a interrupt signal this\n function will catch it and raise a :exc:`Abort` exception.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param text: the question to ask.\n :param default: the default for the prompt.\n :param abort: if this is set to `True` a negative answer aborts the\n exception by raising :exc:`Abort`.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n prompt = _build_prompt(\n text, prompt_suffix, show_default, \"Y/n\" if default else \"y/N\"\n )\n while 1:\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(prompt, nl=False, err=err)\n value = visible_prompt_func(\"\").lower().strip()\n except (KeyboardInterrupt, EOFError):\n raise Abort()\n if value in (\"y\", \"yes\"):\n rv = True\n elif value in (\"n\", \"no\"):\n rv = False\n elif value == \"\":\n rv = default\n else:\n echo(\"Error: invalid input\", err=err)\n continue\n break\n if abort and not rv:\n raise Abort()\n return rv\n\n\ndef get_terminal_size():\n \"\"\"Returns the current size of the terminal as tuple in the form\n ``(width, height)`` in columns and rows.\n \"\"\"\n import shutil\n\n if hasattr(shutil, \"get_terminal_size\"):\n return shutil.get_terminal_size()\n\n # We provide a sensible default for get_winterm_size() when being invoked\n # inside a subprocess. Without this, it would not provide a useful input.\n if get_winterm_size is not None:\n size = get_winterm_size()\n if size == (0, 0):\n return (79, 24)\n else:\n return size\n\n def ioctl_gwinsz(fd):\n try:\n import fcntl\n import termios\n\n cr = struct.unpack(\"hh\", fcntl.ioctl(fd, termios.TIOCGWINSZ, \"1234\"))\n except Exception:\n return\n return cr\n\n cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n try:\n cr = ioctl_gwinsz(fd)\n finally:\n os.close(fd)\n except Exception:\n pass\n if not cr or not cr[0] or not cr[1]:\n cr = (os.environ.get(\"LINES\", 25), os.environ.get(\"COLUMNS\", DEFAULT_COLUMNS))\n return int(cr[1]), int(cr[0])\n\n\ndef echo_via_pager(text_or_generator, color=None):\n \"\"\"This function takes a text and shows it via an environment specific\n pager on stdout.\n\n .. versionchanged:: 3.0\n Added the `color` flag.\n\n :param text_or_generator: the text to page, or alternatively, a\n generator emitting the text to page.\n :param color: controls if the pager supports ANSI colors or not. The\n default is autodetection.\n \"\"\"\n color = resolve_color_default(color)\n\n if inspect.isgeneratorfunction(text_or_generator):\n i = text_or_generator()\n elif isinstance(text_or_generator, str):\n i = [text_or_generator]\n else:\n i = iter(text_or_generator)\n\n # convert every element of i to a text type if necessary\n text_generator = (el if isinstance(el, str) else str(el) for el in i)\n\n from ._termui_impl import pager\n\n return pager(itertools.chain(text_generator, \"\\n\"), color)\n\n\ndef progressbar(\n iterable=None,\n length=None,\n label=None,\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n fill_char=\"#\",\n empty_char=\"-\",\n bar_template=\"%(label)s [%(bar)s] %(info)s\",\n info_sep=\" \",\n width=36,\n file=None,\n color=None,\n):\n \"\"\"This function creates an iterable context manager that can be used\n to iterate over something while showing a progress bar. It will\n either iterate over the `iterable` or `length` items (that are counted\n up). While iteration happens, this function will print a rendered\n progress bar to the given `file` (defaults to stdout) and will attempt\n to calculate remaining time and more. By default, this progress bar\n will not be rendered if the file is not a terminal.\n\n The context manager creates the progress bar. When the context\n manager is entered the progress bar is already created. With every\n iteration over the progress bar, the iterable passed to the bar is\n advanced and the bar is updated. When the context manager exits,\n a newline is printed and the progress bar is finalized on screen.\n\n Note: The progress bar is currently designed for use cases where the\n total progress can be expected to take at least several seconds.\n Because of this, the ProgressBar class object won't display\n progress that is considered too fast, and progress where the time\n between steps is less than a second.\n\n No printing must happen or the progress bar will be unintentionally\n destroyed.\n\n Example usage::\n\n with progressbar(items) as bar:\n for item in bar:\n do_something_with(item)\n\n Alternatively, if no iterable is specified, one can manually update the\n progress bar through the `update()` method instead of directly\n iterating over the progress bar. The update method accepts the number\n of steps to increment the bar with::\n\n with progressbar(length=chunks.total_bytes) as bar:\n for chunk in chunks:\n process_chunk(chunk)\n bar.update(chunks.bytes)\n\n The ``update()`` method also takes an optional value specifying the\n ``current_item`` at the new position. This is useful when used\n together with ``item_show_func`` to customize the output for each\n manual step::\n\n with click.progressbar(\n length=total_size,\n label='Unzipping archive',\n item_show_func=lambda a: a.filename\n ) as bar:\n for archive in zip_file:\n archive.extract()\n bar.update(archive.size, archive)\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `color` parameter. Added a `update` method to the\n progressbar object.\n\n :param iterable: an iterable to iterate over. If not provided the length\n is required.\n :param length: the number of items to iterate over. By default the\n progressbar will attempt to ask the iterator about its\n length, which might or might not work. If an iterable is\n also provided this parameter can be used to override the\n length. If an iterable is not provided the progress bar\n will iterate over a range of that length.\n :param label: the label to show next to the progress bar.\n :param show_eta: enables or disables the estimated time display. This is\n automatically disabled if the length cannot be\n determined.\n :param show_percent: enables or disables the percentage display. The\n default is `True` if the iterable has a length or\n `False` if not.\n :param show_pos: enables or disables the absolute position display. The\n default is `False`.\n :param item_show_func: a function called with the current item which\n can return a string to show the current item\n next to the progress bar. Note that the current\n item can be `None`!\n :param fill_char: the character to use to show the filled part of the\n progress bar.\n :param empty_char: the character to use to show the non-filled part of\n the progress bar.\n :param bar_template: the format string to use as template for the bar.\n The parameters in it are ``label`` for the label,\n ``bar`` for the progress bar and ``info`` for the\n info section.\n :param info_sep: the separator between multiple info items (eta etc.)\n :param width: the width of the progress bar in characters, 0 means full\n terminal width\n :param file: the file to write to. If this is not a terminal then\n only the label is printed.\n :param color: controls if the terminal supports ANSI colors or not. The\n default is autodetection. This is only needed if ANSI\n codes are included anywhere in the progress bar output\n which is not the case by default.\n \"\"\"\n from ._termui_impl import ProgressBar\n\n color = resolve_color_default(color)\n return ProgressBar(\n iterable=iterable,\n length=length,\n show_eta=show_eta,\n show_percent=show_percent,\n show_pos=show_pos,\n item_show_func=item_show_func,\n fill_char=fill_char,\n empty_char=empty_char,\n bar_template=bar_template,\n info_sep=info_sep,\n file=file,\n label=label,\n width=width,\n color=color,\n )\n\n\ndef clear():\n \"\"\"Clears the terminal screen. This will have the effect of clearing\n the whole visible space of the terminal and moving the cursor to the\n top left. This does not do anything if not connected to a terminal.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isatty(sys.stdout):\n return\n # If we're on Windows and we don't have colorama available, then we\n # clear the screen by shelling out. Otherwise we can use an escape\n # sequence.\n if WIN:\n os.system(\"cls\")\n else:\n sys.stdout.write(\"\\033[2J\\033[1;1H\")\n\n\ndef style(\n text,\n fg=None,\n bg=None,\n bold=None,\n dim=None,\n underline=None,\n blink=None,\n reverse=None,\n reset=True,\n):\n \"\"\"Styles a text with ANSI styles and returns the new string. By\n default the styling is self contained which means that at the end\n of the string a reset code is issued. This can be prevented by\n passing ``reset=False``.\n\n Examples::\n\n click.echo(click.style('Hello World!', fg='green'))\n click.echo(click.style('ATTENTION!', blink=True))\n click.echo(click.style('Some things', reverse=True, fg='cyan'))\n\n Supported color names:\n\n * ``black`` (might be a gray)\n * ``red``\n * ``green``\n * ``yellow`` (might be an orange)\n * ``blue``\n * ``magenta``\n * ``cyan``\n * ``white`` (might be light gray)\n * ``bright_black``\n * ``bright_red``\n * ``bright_green``\n * ``bright_yellow``\n * ``bright_blue``\n * ``bright_magenta``\n * ``bright_cyan``\n * ``bright_white``\n * ``reset`` (reset the color code only)\n\n .. versionadded:: 2.0\n\n .. versionadded:: 7.0\n Added support for bright colors.\n\n :param text: the string to style with ansi codes.\n :param fg: if provided this will become the foreground color.\n :param bg: if provided this will become the background color.\n :param bold: if provided this will enable or disable bold mode.\n :param dim: if provided this will enable or disable dim mode. This is\n badly supported.\n :param underline: if provided this will enable or disable underline.\n :param blink: if provided this will enable or disable blinking.\n :param reverse: if provided this will enable or disable inverse\n rendering (foreground becomes background and the\n other way round).\n :param reset: by default a reset-all code is added at the end of the\n string which means that styles do not carry over. This\n can be disabled to compose styles.\n \"\"\"\n bits = []\n if fg:\n try:\n bits.append(f\"\\033[{_ansi_colors[fg]}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {fg!r}\")\n if bg:\n try:\n bits.append(f\"\\033[{_ansi_colors[bg] + 10}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {bg!r}\")\n if bold is not None:\n bits.append(f\"\\033[{1 if bold else 22}m\")\n if dim is not None:\n bits.append(f\"\\033[{2 if dim else 22}m\")\n if underline is not None:\n bits.append(f\"\\033[{4 if underline else 24}m\")\n if blink is not None:\n bits.append(f\"\\033[{5 if blink else 25}m\")\n if reverse is not None:\n bits.append(f\"\\033[{7 if reverse else 27}m\")\n bits.append(text)\n if reset:\n bits.append(_ansi_reset_all)\n return \"\".join(bits)\n\n\ndef unstyle(text):\n \"\"\"Removes ANSI styling information from a string. Usually it's not\n necessary to use this function as Click's echo function will\n automatically remove styling if necessary.\n\n .. versionadded:: 2.0\n\n :param text: the text to remove style information from.\n \"\"\"\n return strip_ansi(text)\n\n\ndef secho(message=None, file=None, nl=True, err=False, color=None, **styles):\n \"\"\"This function combines :func:`echo` and :func:`style` into one\n call. As such the following two calls are the same::\n\n click.secho('Hello World!', fg='green')\n click.echo(click.style('Hello World!', fg='green'))\n\n All keyword arguments are forwarded to the underlying functions\n depending on which one they go with.\n\n .. versionadded:: 2.0\n \"\"\"\n if message is not None:\n message = style(message, **styles)\n return echo(message, file=file, nl=nl, err=err, color=color)\n\n\ndef edit(\n text=None, editor=None, env=None, require_save=True, extension=\".txt\", filename=None\n):\n r\"\"\"Edits the given text in the defined editor. If an editor is given\n (should be the full path to the executable but the regular operating\n system search path is used for finding the executable) it overrides\n the detected editor. Optionally, some environment variables can be\n used. If the editor is closed without changes, `None` is returned. In\n case a file is edited directly the return value is always `None` and\n `require_save` and `extension` are ignored.\n\n If the editor cannot be opened a :exc:`UsageError` is raised.\n\n Note for Windows: to simplify cross-platform usage, the newlines are\n automatically converted from POSIX to Windows and vice versa. As such,\n the message here will have ``\\n`` as newline markers.\n\n :param text: the text to edit.\n :param editor: optionally the editor to use. Defaults to automatic\n detection.\n :param env: environment variables to forward to the editor.\n :param require_save: if this is true, then not saving in the editor\n will make the return value become `None`.\n :param extension: the extension to tell the editor about. This defaults\n to `.txt` but changing this might change syntax\n highlighting.\n :param filename: if provided it will edit this file instead of the\n provided text contents. It will not use a temporary\n file as an indirection in that case.\n \"\"\"\n from ._termui_impl import Editor\n\n editor = Editor(\n editor=editor, env=env, require_save=require_save, extension=extension\n )\n if filename is None:\n return editor.edit(text)\n editor.edit_file(filename)\n\n\ndef launch(url, wait=False, locate=False):\n \"\"\"This function launches the given URL (or filename) in the default\n viewer application for this file type. If this is an executable, it\n might launch the executable in a new session. The return value is\n the exit code of the launched application. Usually, ``0`` indicates\n success.\n\n Examples::\n\n click.launch('https://click.palletsprojects.com/')\n click.launch('/my/downloaded/file', locate=True)\n\n .. versionadded:: 2.0\n\n :param url: URL or filename of the thing to launch.\n :param wait: waits for the program to stop.\n :param locate: if this is set to `True` then instead of launching the\n application associated with the URL it will attempt to\n launch a file manager with the file located. This\n might have weird effects if the URL does not point to\n the filesystem.\n \"\"\"\n from ._termui_impl import open_url\n\n return open_url(url, wait=wait, locate=locate)\n\n\n# If this is provided, getchar() calls into this instead. This is used\n# for unittesting purposes.\n_getchar = None\n\n\ndef getchar(echo=False):\n \"\"\"Fetches a single character from the terminal and returns it. This\n will always return a unicode character and under certain rare\n circumstances this might return more than one character. The\n situations which more than one character is returned is when for\n whatever reason multiple characters end up in the terminal buffer or\n standard input was not actually a terminal.\n\n Note that this will always read from the terminal, even if something\n is piped into the standard input.\n\n Note for Windows: in rare cases when typing non-ASCII characters, this\n function might wait for a second character and then return both at once.\n This is because certain Unicode characters look like special-key markers.\n\n .. versionadded:: 2.0\n\n :param echo: if set to `True`, the character read will also show up on\n the terminal. The default is to not show it.\n \"\"\"\n f = _getchar\n if f is None:\n from ._termui_impl import getchar as f\n return f(echo)\n\n\ndef raw_terminal():\n from ._termui_impl import raw_terminal as f\n\n return f()\n\n\ndef pause(info=\"Press any key to continue ...\", err=False):\n \"\"\"This command stops execution and waits for the user to press any\n key to continue. This is similar to the Windows batch \"pause\"\n command. If the program is not run through a terminal, this command\n will instead do nothing.\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param info: the info string to print before pausing.\n :param err: if set to message goes to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n if not isatty(sys.stdin) or not isatty(sys.stdout):\n return\n try:\n if info:\n echo(info, nl=False, err=err)\n try:\n getchar()\n except (KeyboardInterrupt, EOFError):\n pass\n finally:\n if info:\n echo(err=err)\n", "path": "src/click/termui.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index efc39145c..5721fd734 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,6 +16,8 @@ Unreleased - ``version_option`` uses ``importlib.metadata`` (or the ``importlib_metadata`` backport) instead of ``pkg_resources``. :issue:`1582` +- If validation fails for a prompt with ``hide_input=True``, the value + is not shown in the error message. :issue:`1460` Version 7.1.2 diff --git a/src/click/termui.py b/src/click/termui.py index a1bdf2ab8..fd3d91576 100644 --- a/src/click/termui.py +++ b/src/click/termui.py @@ -153,7 +153,10 @@ def prompt_func(text): try: result = value_proc(value) except UsageError as e: - echo(f"Error: {e.message}", err=err) # noqa: B306 + if hide_input: + echo("Error: the value you entered was invalid", err=err) + else: + echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: return result
click.prompt(type=int, hide_input=True) outputs rejected input Despite asking `click.prompt()` to hide input it will still outputs the input if it rejects it. I get the same behavior for `7.0` from PyPI, `7.x` from Git, and `master` from Git. ```bash altendky@p1:~$ venv/bin/python -c 'import click; click.prompt("prompt", type=int, hide_input=True)' prompt: Error: wait... i get to see this? is not a valid integer prompt: ``` If you specify a type (let's say you are inputting a numeric pin) then the same can happen with `click.password_option()`. ```python3 import click @click.command() @click.password_option(type=int) def cli(password): pass cli() ``` ```bash altendky@p1:~$ venv/bin/python x.py Password: Error: lkjsaf is not a valid integer Password: ``` The workaround for `click.prompt()` anyways I guess is to not specify a type and to implement the rejection manually. If there is agreement that this is an issue that should be fixed then I can try to put together a PR for it. click.prompt(type=int, hide_input=True) outputs rejected input Despite asking `click.prompt()` to hide input it will still outputs the input if it rejects it. I get the same behavior for `7.0` from PyPI, `7.x` from Git, and `master` from Git. ```bash altendky@p1:~$ venv/bin/python -c 'import click; click.prompt("prompt", type=int, hide_input=True)' prompt: Error: wait... i get to see this? is not a valid integer prompt: ``` If you specify a type (let's say you are inputting a numeric pin) then the same can happen with `click.password_option()`. ```python3 import click @click.command() @click.password_option(type=int) def cli(password): pass cli() ``` ```bash altendky@p1:~$ venv/bin/python x.py Password: Error: lkjsaf is not a valid integer Password: ``` The workaround for `click.prompt()` anyways I guess is to not specify a type and to implement the rejection manually. If there is agreement that this is an issue that should be fixed then I can try to put together a PR for it.
chainer__chainer-271
[ { "content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\nclass FunctionSet(object):\n\n \"\"\"Set of objects with ``parameters`` and ``gradients`` properties.\n\n :class:`FunctionSet` is useful to collect parameters and gradients of\n multiple parameterized :class:`Function` objects. :class:`FunctionSet`\n itself also implements :attr:`~FunctionSet.parameters` and\n :attr:`~FunctionSet.gradients`, so it can be nested in another\n :class:`FunctionSet` object.\n\n Function registration is done by just adding an attribute to\n :class:`FunctionSet` object.\n\n \"\"\"\n\n def __init__(self, **functions):\n \"\"\"Initializes the function set by given functions.\n\n Args:\n **functions: ``dict`` of ``str`` key and :class:`Function` values.\n The key-value pairs are just set to the :class:`FunctionSet`\n object as attributes.\n\n \"\"\"\n for name, func in six.iteritems(functions):\n setattr(self, name, func)\n\n def collect_parameters(self):\n \"\"\"Returns a tuple of parameters and gradients.\n\n Returns:\n Tuple (pair) of two tuples. The first element is a tuple of\n parameter arrays, and the second is a tuple of gradient arrays.\n\n \"\"\"\n return self.parameters, self.gradients\n\n def to_gpu(self, device=None):\n \"\"\"Migrates all parameters and gradients onto GPU.\n\n This method calls ``to_gpu`` method of each registered object.\n\n Args:\n device (int or :class:`pycuda.driver.Device` or ``None``): Device\n ID of GPU. If ``None`` is given, it uses the current device.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_gpu(device=device)\n return self\n\n def to_cpu(self):\n \"\"\"Migrates all parameters and gradients onto CPU.\n\n This method calls ``to_cpu`` method of each registered object.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_cpu()\n return self\n\n def copy_parameters_from(self, params):\n \"\"\"Copies parameters from another source without reallocation.\n\n Args:\n params (Iterable): Iterable of parameter arrays.\n\n \"\"\"\n for dst, src in zip(self.parameters, params):\n if isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n dst.copy(src)\n else:\n src.get(dst)\n elif isinstance(src, numpy.ndarray):\n dst.set(src)\n else:\n cuda.copy(src, out=dst)\n\n @property\n def parameters(self):\n \"\"\"Tuple of parameter arrays of all registered functions.\n\n The order of parameters is consistent with :meth:`gradients` property.\n\n \"\"\"\n return sum((func.parameters for _, func in self._get_sorted_funcs()),\n ())\n\n @parameters.setter\n def parameters(self, params):\n param_iter = iter(params)\n for _, func in self._get_sorted_funcs():\n func.parameters = param_iter\n\n @property\n def gradients(self):\n \"\"\"Tuple of gradient arrays of all registered functions.\n\n The order of gradients is consistent with :meth:`parameters` property.\n\n \"\"\"\n return sum((func.gradients for _, func in self._get_sorted_funcs()),\n ())\n\n @gradients.setter\n def gradients(self, grads):\n grad_iter = iter(grads)\n for _, func in self._get_sorted_funcs():\n func.gradients = grad_iter\n\n def _get_sorted_funcs(self):\n return sorted(six.iteritems(self.__dict__))\n", "path": "chainer/function_set.py" } ]
[ { "content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\nclass FunctionSet(object):\n\n \"\"\"Set of objects with ``parameters`` and ``gradients`` properties.\n\n :class:`FunctionSet` is useful to collect parameters and gradients of\n multiple parameterized :class:`Function` objects. :class:`FunctionSet`\n itself also implements :attr:`~FunctionSet.parameters` and\n :attr:`~FunctionSet.gradients`, so it can be nested in another\n :class:`FunctionSet` object.\n\n Function registration is done by just adding an attribute to\n :class:`FunctionSet` object.\n\n \"\"\"\n\n def __init__(self, **functions):\n \"\"\"Initializes the function set by given functions.\n\n Args:\n **functions: ``dict`` of ``str`` key and :class:`Function` values.\n The key-value pairs are just set to the :class:`FunctionSet`\n object as attributes.\n\n \"\"\"\n for name, func in six.iteritems(functions):\n setattr(self, name, func)\n\n def collect_parameters(self):\n \"\"\"Returns a tuple of parameters and gradients.\n\n Returns:\n Tuple (pair) of two tuples. The first element is a tuple of\n parameter arrays, and the second is a tuple of gradient arrays.\n\n \"\"\"\n return self.parameters, self.gradients\n\n def to_gpu(self, device=None):\n \"\"\"Migrates all parameters and gradients onto GPU.\n\n This method calls ``to_gpu`` method of each registered object.\n\n Args:\n device (int or :class:`pycuda.driver.Device` or ``None``): Device\n ID of GPU. If ``None`` is given, it uses the current device.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_gpu(device=device)\n return self\n\n def to_cpu(self):\n \"\"\"Migrates all parameters and gradients onto CPU.\n\n This method calls ``to_cpu`` method of each registered object.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_cpu()\n return self\n\n def copy_parameters_from(self, params):\n \"\"\"Copies parameters from another source without reallocation.\n\n Args:\n params (Iterable): Iterable of parameter arrays.\n\n \"\"\"\n for dst, src in zip(self.parameters, params):\n if isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n numpy.copyto(dst, src)\n else:\n src.get(dst)\n elif isinstance(src, numpy.ndarray):\n dst.set(src)\n else:\n cuda.copy(src, out=dst)\n\n @property\n def parameters(self):\n \"\"\"Tuple of parameter arrays of all registered functions.\n\n The order of parameters is consistent with :meth:`gradients` property.\n\n \"\"\"\n return sum((func.parameters for _, func in self._get_sorted_funcs()),\n ())\n\n @parameters.setter\n def parameters(self, params):\n param_iter = iter(params)\n for _, func in self._get_sorted_funcs():\n func.parameters = param_iter\n\n @property\n def gradients(self):\n \"\"\"Tuple of gradient arrays of all registered functions.\n\n The order of gradients is consistent with :meth:`parameters` property.\n\n \"\"\"\n return sum((func.gradients for _, func in self._get_sorted_funcs()),\n ())\n\n @gradients.setter\n def gradients(self, grads):\n grad_iter = iter(grads)\n for _, func in self._get_sorted_funcs():\n func.gradients = grad_iter\n\n def _get_sorted_funcs(self):\n return sorted(six.iteritems(self.__dict__))\n", "path": "chainer/function_set.py" } ]
diff --git a/chainer/function_set.py b/chainer/function_set.py index ca4ac1ab3466..6c1d5a6e37fb 100644 --- a/chainer/function_set.py +++ b/chainer/function_set.py @@ -81,7 +81,7 @@ def copy_parameters_from(self, params): for dst, src in zip(self.parameters, params): if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): - dst.copy(src) + numpy.copyto(dst, src) else: src.get(dst) elif isinstance(src, numpy.ndarray): diff --git a/tests/test_function_set.py b/tests/test_function_set.py index d4596431adac..0db2de390494 100644 --- a/tests/test_function_set.py +++ b/tests/test_function_set.py @@ -102,5 +102,41 @@ def test_pickle_gpu(self): fs2.to_cpu() self.check_equal_fs(self.fs, fs2) + def check_copy_parameters_from(self, src_gpu=False, dst_gpu=False): + aW = np.random.uniform(-1, 1, (2, 3)).astype(np.float32) + ab = np.random.uniform(-1, 1, (2,)).astype(np.float32) + bW = np.random.uniform(-1, 1, (2, 3)).astype(np.float32) + bb = np.random.uniform(-1, 1, (2,)).astype(np.float32) + params = [aW.copy(), ab.copy(), bW.copy(), bb.copy()] + + if dst_gpu: + self.fs.to_gpu() + + if src_gpu: + params = map(cuda.to_gpu, params) + + self.fs.copy_parameters_from(params) + self.fs.to_cpu() + + self.assertTrue((self.fs.a.W == aW).all()) + self.assertTrue((self.fs.a.b == ab).all()) + self.assertTrue((self.fs.b.W == bW).all()) + self.assertTrue((self.fs.b.b == bb).all()) + + def test_copy_parameters_from_cpu_to_cpu(self): + self.check_copy_parameters_from(False, False) + + @attr.gpu + def test_copy_parameters_from_cpu_to_gpu(self): + self.check_copy_parameters_from(False, True) + + @attr.gpu + def test_copy_parameters_from_gpu_to_cpu(self): + self.check_copy_parameters_from(True, False) + + @attr.gpu + def test_copy_parameters_from_gpu_to_gpu(self): + self.check_copy_parameters_from(True, True) + testing.run_module(__name__, __file__)
FunctionSet.copy_parameters_from() Hi all! The code in 'FunctionSet.copy_parameters_from()' does not work, when 'src' and 'dst' are both numpy.ndarrays? ``` python if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): dst.copy(src) # this gives a ValueError ``` I think this should read ``` python if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): numpy.copyto(dst, src) ``` My numpy.version.full_version is 1.9.2, the 'copyto' method exists since 1.7.0. Cheers, -r
Qiskit__qiskit-1024
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Tools for compiling a batch of quantum circuits.\"\"\"\nfrom copy import deepcopy\nimport logging\nimport uuid\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.sparse.csgraph as cs\n\nfrom qiskit.transpiler._transpilererror import TranspilerError\nfrom qiskit._qiskiterror import QISKitError\nfrom qiskit import QuantumCircuit\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.unroll import DagUnroller, DAGBackend, JsonBackend\nfrom qiskit.mapper import (Coupling, optimize_1q_gates, coupling_list2dict, swap_mapper,\n cx_cancellation, direction_mapper,\n remove_last_measurements, return_last_measurements)\nfrom qiskit.qobj import Qobj, QobjConfig, QobjExperiment, QobjItem, QobjHeader\nfrom ._parallel import parallel_map\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=redefined-builtin\ndef compile(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n pass_manager=None):\n \"\"\"Compile a list of circuits into a qobj.\n\n Args:\n circuits (QuantumCircuit or list[QuantumCircuit]): circuits to compile\n backend (BaseBackend): a backend to compile for\n config (dict): dictionary of parameters (e.g. noise) used by runner\n basis_gates (str): comma-separated basis gate set to compile to\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n initial_layout (list): initial layout of qubits in mapping\n shots (int): number of repetitions of each circuit, for sampling\n max_credits (int): maximum credits to use\n seed (int): random seed for simulators\n qobj_id (int): identifier for the generated qobj\n hpc (dict): HPC simulator parameters\n pass_manager (PassManager): a pass_manager for the transpiler stage\n\n Returns:\n QobjExperiment: Experiment to be wrapped in a Qobj.\n\n Raises:\n TranspilerError: in case of bad compile options, e.g. the hpc options.\n \"\"\"\n if isinstance(circuits, QuantumCircuit):\n circuits = [circuits]\n\n # FIXME: THIS NEEDS TO BE CLEANED UP -- some things to decide for list of circuits:\n # 1. do all circuits have same coupling map?\n # 2. do all circuit have the same basis set?\n # 3. do they all have same registers etc?\n backend_conf = backend.configuration()\n backend_name = backend_conf['name']\n # Check for valid parameters for the experiments.\n if hpc is not None and \\\n not all(key in hpc for key in ('multi_shot_optimization', 'omp_num_threads')):\n raise TranspilerError('Unknown HPC parameter format!')\n basis_gates = basis_gates or backend_conf['basis_gates']\n coupling_map = coupling_map or backend_conf['coupling_map']\n\n # step 1: Making the list of dag circuits\n dags = _circuits_2_dags(circuits)\n\n # step 2: Transpile all the dags\n\n # FIXME: Work-around for transpiling multiple circuits with different qreg names.\n # Make compile take a list of initial_layouts.\n _initial_layout = initial_layout\n\n # Pick a good initial layout if coupling_map is not already satisfied\n # otherwise keep it as q[i]->q[i].\n # TODO: move this inside mapper pass.\n initial_layouts = []\n for dag in dags:\n if (initial_layout is None and not backend.configuration()['simulator']\n and not _matches_coupling_map(dag, coupling_map)):\n _initial_layout = _pick_best_layout(dag, backend)\n initial_layouts.append(_initial_layout)\n dags = _transpile_dags(dags, basis_gates=basis_gates, coupling_map=coupling_map,\n initial_layouts=initial_layouts, seed=seed,\n pass_manager=pass_manager)\n\n # step 3: Making a qobj\n qobj = _dags_2_qobj(dags, backend_name=backend_name,\n config=config, shots=shots, max_credits=max_credits,\n qobj_id=qobj_id, basis_gates=basis_gates,\n coupling_map=coupling_map, seed=seed)\n\n return qobj\n\n\ndef _circuits_2_dags(circuits):\n \"\"\"Convert a list of circuits into a list of dags.\n\n Args:\n circuits (list[QuantumCircuit]): circuit to compile\n\n Returns:\n list[DAGCircuit]: the dag representation of the circuits\n to be used in the transpiler\n \"\"\"\n dags = parallel_map(DAGCircuit.fromQuantumCircuit, circuits)\n return dags\n\n\ndef _transpile_dags(dags, basis_gates='u1,u2,u3,cx,id', coupling_map=None,\n initial_layouts=None, seed=None, pass_manager=None):\n \"\"\"Transform multiple dags through a sequence of passes.\n\n Args:\n dags (list[DAGCircuit]): dag circuits to transform\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling\n initial_layouts (list[dict]): A mapping of qubit to qubit for each dag\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n\n Returns:\n list[DAGCircuit]: the dag circuits after going through transpilation\n\n Raises:\n TranspilerError: if the format is not valid.\n \"\"\"\n\n index = list(range(len(dags)))\n final_dags = parallel_map(_transpile_dags_parallel, index,\n task_args=(dags, initial_layouts),\n task_kwargs={'basis_gates': basis_gates,\n 'coupling_map': coupling_map,\n 'seed': seed,\n 'pass_manager': pass_manager})\n return final_dags\n\n\ndef _transpile_dags_parallel(idx, dags, initial_layouts, basis_gates='u1,u2,u3,cx,id',\n coupling_map=None, seed=None, pass_manager=None):\n \"\"\"Helper function for transpiling in parallel (if available).\n\n Args:\n idx (int): Index for dag of interest\n dags (list): List of dags\n initial_layouts (list): List of initial layouts\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n Returns:\n DAGCircuit: DAG circuit after going through transpilation.\n \"\"\"\n dag = dags[idx]\n initial_layout = initial_layouts[idx]\n final_dag, final_layout = transpile(\n dag,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n initial_layout=initial_layout,\n get_layout=True,\n seed=seed,\n pass_manager=pass_manager)\n final_dag.layout = [[k, v]\n for k, v in final_layout.items()] if final_layout else None\n return final_dag\n\n\ndef _dags_2_qobj(dags, backend_name, config=None, shots=None,\n max_credits=None, qobj_id=None, basis_gates=None, coupling_map=None,\n seed=None):\n \"\"\"Convert a list of dags into a qobj.\n\n Args:\n dags (list[DAGCircuit]): dags to compile\n backend_name (str): name of runner backend\n config (dict): dictionary of parameters (e.g. noise) used by runner\n shots (int): number of repetitions of each circuit, for sampling\n max_credits (int): maximum credits to use\n qobj_id (int): identifier for the generated qobj\n basis_gates (list[str])): basis gates for the experiment\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n seed (int): random seed for simulators\n\n Returns:\n Qobj: the Qobj to be run on the backends\n \"\"\"\n # TODO: the following will be removed from qobj and thus removed here:\n # `basis_gates`, `coupling_map`\n\n # Step 1: create the Qobj, with empty experiments.\n # Copy the configuration: the values in `config` have preference\n qobj_config = deepcopy(config or {})\n # TODO: \"memory_slots\" is required by the qobj schema in the top-level\n # qobj.config, and is user-defined. At the moment is set to the maximum\n # number of *register* slots for the circuits, in order to have `measure`\n # behave properly until the transition is over; and each circuit stores\n # its memory_slots in its configuration.\n qobj_config.update({'shots': shots,\n 'max_credits': max_credits,\n 'memory_slots': 0})\n\n qobj = Qobj(qobj_id=qobj_id or str(uuid.uuid4()),\n config=QobjConfig(**qobj_config),\n experiments=[],\n header=QobjHeader(backend_name=backend_name))\n if seed:\n qobj.config.seed = seed\n\n qobj.experiments = parallel_map(_dags_2_qobj_parallel, dags,\n task_kwargs={'basis_gates': basis_gates,\n 'config': config,\n 'coupling_map': coupling_map})\n\n # Update the `memory_slots` value.\n # TODO: remove when `memory_slots` can be provided by the user.\n qobj.config.memory_slots = max(experiment.config.memory_slots for\n experiment in qobj.experiments)\n\n # Update the `n_qubits` global value.\n # TODO: num_qubits is not part of the qobj specification, but needed\n # for the simulator.\n qobj.config.n_qubits = max(experiment.config.n_qubits for\n experiment in qobj.experiments)\n\n return qobj\n\n\ndef _dags_2_qobj_parallel(dag, config=None, basis_gates=None,\n coupling_map=None):\n \"\"\"Helper function for dags to qobj in parallel (if available).\n\n Args:\n dag (DAGCircuit): DAG to compile\n config (dict): dictionary of parameters (e.g. noise) used by runner\n basis_gates (list[str])): basis gates for the experiment\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n\n Returns:\n Qobj: Qobj to be run on the backends\n \"\"\"\n json_circuit = DagUnroller(dag, JsonBackend(dag.basis)).execute()\n # Step 3a: create the Experiment based on json_circuit\n experiment = QobjExperiment.from_dict(json_circuit)\n # Step 3b: populate the Experiment configuration and header\n experiment.header.name = dag.name\n # TODO: place in header or config?\n experiment_config = deepcopy(config or {})\n experiment_config.update({\n 'coupling_map': coupling_map,\n 'basis_gates': basis_gates,\n 'layout': dag.layout,\n 'memory_slots': sum(dag.cregs.values()),\n # TODO: `n_qubits` is not part of the qobj spec, but needed for the simulator.\n 'n_qubits': sum(dag.qregs.values())})\n experiment.config = QobjItem(**experiment_config)\n\n # set eval_symbols=True to evaluate each symbolic expression\n # TODO: after transition to qobj, we can drop this\n experiment.header.compiled_circuit_qasm = dag.qasm(\n qeflag=True, eval_symbols=True)\n # Step 3c: add the Experiment to the Qobj\n return experiment\n\n\ndef transpile(dag, basis_gates='u1,u2,u3,cx,id', coupling_map=None,\n initial_layout=None, get_layout=False,\n format='dag', seed=None, pass_manager=None):\n \"\"\"Transform a dag circuit into another dag circuit (transpile), through\n consecutive passes on the dag.\n\n Args:\n dag (DAGCircuit): dag circuit to transform via transpilation\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling::\n\n [\n [control0(int), target0(int)],\n [control1(int), target1(int)],\n ]\n\n eg. [[0, 2], [1, 2], [1, 3], [3, 4]}\n\n initial_layout (dict): A mapping of qubit to qubit::\n\n {\n (\"q\", start(int)): (\"q\", final(int)),\n ...\n }\n eg.\n {\n (\"q\", 0): (\"q\", 0),\n (\"q\", 1): (\"q\", 1),\n (\"q\", 2): (\"q\", 2),\n (\"q\", 3): (\"q\", 3)\n }\n get_layout (bool): flag for returning the final layout after mapping\n format (str): The target format of the compilation:\n {'dag', 'json', 'qasm'}\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n\n Returns:\n DAGCircuit: transformed dag\n DAGCircuit, dict: transformed dag along with the final layout on backend qubits\n\n Raises:\n TranspilerError: if the format is not valid.\n \"\"\"\n # TODO: `basis_gates` will be removed after we have the unroller pass.\n # TODO: `coupling_map`, `initial_layout`, `get_layout`, `seed` removed after mapper pass.\n\n # TODO: move this to the mapper pass\n num_qubits = sum(dag.qregs.values())\n if num_qubits == 1 or coupling_map == \"all-to-all\":\n coupling_map = None\n\n final_layout = None\n\n if pass_manager:\n # run the passes specified by the pass manager\n for pass_ in pass_manager.passes():\n pass_.run(dag)\n else:\n # default set of passes\n # TODO: move each step here to a pass, and use a default passmanager below\n basis = basis_gates.split(',') if basis_gates else []\n dag_unroller = DagUnroller(dag, DAGBackend(basis))\n dag = dag_unroller.expand_gates()\n # if a coupling map is given compile to the map\n if coupling_map:\n logger.info(\"pre-mapping properties: %s\",\n dag.property_summary())\n # Insert swap gates\n coupling = Coupling(coupling_list2dict(coupling_map))\n removed_meas = remove_last_measurements(dag)\n logger.info(\"measurements moved: %s\", removed_meas)\n logger.info(\"initial layout: %s\", initial_layout)\n dag, final_layout, last_layout = swap_mapper(\n dag, coupling, initial_layout, trials=20, seed=seed)\n logger.info(\"final layout: %s\", final_layout)\n # Expand swaps\n dag_unroller = DagUnroller(dag, DAGBackend(basis))\n dag = dag_unroller.expand_gates()\n # Change cx directions\n dag = direction_mapper(dag, coupling)\n # Simplify cx gates\n cx_cancellation(dag)\n # Simplify single qubit gates\n dag = optimize_1q_gates(dag)\n return_last_measurements(dag, removed_meas,\n last_layout)\n logger.info(\"post-mapping properties: %s\",\n dag.property_summary())\n\n # choose output format\n # TODO: do we need all of these formats, or just the dag?\n if format == 'dag':\n compiled_circuit = dag\n elif format == 'json':\n # FIXME: JsonBackend is wrongly taking an ordered dict as basis, not list\n dag_unroller = DagUnroller(dag, JsonBackend(dag.basis))\n compiled_circuit = dag_unroller.execute()\n elif format == 'qasm':\n compiled_circuit = dag.qasm()\n else:\n raise TranspilerError('unrecognized circuit format')\n\n if get_layout:\n return compiled_circuit, final_layout\n return compiled_circuit\n\n\ndef _best_subset(backend, n_qubits):\n \"\"\"Computes the qubit mapping with the best\n connectivity.\n\n Parameters:\n backend (Qiskit.BaseBackend): A QISKit backend instance.\n n_qubits (int): Number of subset qubits to consider.\n\n Returns:\n ndarray: Array of qubits to use for best\n connectivity mapping.\n\n Raises:\n QISKitError: Wrong number of qubits given.\n \"\"\"\n if n_qubits == 1:\n return np.array([0])\n elif n_qubits <= 0:\n raise QISKitError('Number of qubits <= 0.')\n\n device_qubits = backend.configuration()['n_qubits']\n if n_qubits > device_qubits:\n raise QISKitError('Number of qubits greater than device.')\n\n cmap = np.asarray(backend.configuration()['coupling_map'])\n data = np.ones_like(cmap[:, 0])\n sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])),\n shape=(device_qubits, device_qubits)).tocsr()\n best = 0\n best_map = None\n # do bfs with each node as starting point\n for k in range(sp_cmap.shape[0]):\n bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False,\n return_predecessors=False)\n\n connection_count = 0\n for i in range(n_qubits):\n node_idx = bfs[i]\n for j in range(sp_cmap.indptr[node_idx],\n sp_cmap.indptr[node_idx + 1]):\n node = sp_cmap.indices[j]\n for counter in range(n_qubits):\n if node == bfs[counter]:\n connection_count += 1\n break\n\n if connection_count > best:\n best = connection_count\n best_map = bfs[0:n_qubits]\n return best_map\n\n\ndef _matches_coupling_map(dag, coupling_map):\n \"\"\"Iterate over circuit gates to check if all multi-qubit couplings\n match the qubit coupling graph in the backend.\n\n Parameters:\n dag (DAGCircuit): DAG representation of circuit.\n coupling_map (list): Backend coupling map, represented as an adjacency list.\n\n Returns:\n bool: True if all gates readily fit the backend coupling graph.\n False if there's at least one gate that uses multiple qubits\n which does not match the backend couplings.\n \"\"\"\n match = True\n for _, data in dag.multi_graph.nodes(data=True):\n if data['type'] == 'op':\n gate_map = [qr[1] for qr in data['qargs']]\n if gate_map not in coupling_map:\n match = False\n break\n return match\n\n\ndef _pick_best_layout(dag, backend):\n \"\"\"Pick a convenient layout depending on the best matching qubit connectivity\n\n Parameters:\n dag (DAGCircuit): DAG representation of circuit.\n backend (BaseBackend) : The backend with the coupling_map for searching\n\n Returns:\n dict: A special ordered initial_layout\n\n \"\"\"\n num_qubits = sum(dag.qregs.values())\n best_sub = _best_subset(backend, num_qubits)\n layout = {}\n map_iter = 0\n for key, value in dag.qregs.items():\n for i in range(value):\n layout[(key, i)] = ('q', best_sub[map_iter])\n map_iter += 1\n return layout\n", "path": "qiskit/transpiler/_transpiler.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Tools for compiling a batch of quantum circuits.\"\"\"\nfrom copy import deepcopy\nimport logging\nimport uuid\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.sparse.csgraph as cs\n\nfrom qiskit.transpiler._transpilererror import TranspilerError\nfrom qiskit._qiskiterror import QISKitError\nfrom qiskit import QuantumCircuit\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.unroll import DagUnroller, DAGBackend, JsonBackend\nfrom qiskit.mapper import (Coupling, optimize_1q_gates, coupling_list2dict, swap_mapper,\n cx_cancellation, direction_mapper,\n remove_last_measurements, return_last_measurements)\nfrom qiskit.qobj import Qobj, QobjConfig, QobjExperiment, QobjItem, QobjHeader\nfrom ._parallel import parallel_map\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=redefined-builtin\ndef compile(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n pass_manager=None):\n \"\"\"Compile a list of circuits into a qobj.\n\n Args:\n circuits (QuantumCircuit or list[QuantumCircuit]): circuits to compile\n backend (BaseBackend): a backend to compile for\n config (dict): dictionary of parameters (e.g. noise) used by runner\n basis_gates (str): comma-separated basis gate set to compile to\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n initial_layout (list): initial layout of qubits in mapping\n shots (int): number of repetitions of each circuit, for sampling\n max_credits (int): maximum credits to use\n seed (int): random seed for simulators\n qobj_id (int): identifier for the generated qobj\n hpc (dict): HPC simulator parameters\n pass_manager (PassManager): a pass_manager for the transpiler stage\n\n Returns:\n QobjExperiment: Experiment to be wrapped in a Qobj.\n\n Raises:\n TranspilerError: in case of bad compile options, e.g. the hpc options.\n \"\"\"\n if isinstance(circuits, QuantumCircuit):\n circuits = [circuits]\n\n # FIXME: THIS NEEDS TO BE CLEANED UP -- some things to decide for list of circuits:\n # 1. do all circuits have same coupling map?\n # 2. do all circuit have the same basis set?\n # 3. do they all have same registers etc?\n backend_conf = backend.configuration()\n backend_name = backend_conf['name']\n # Check for valid parameters for the experiments.\n if hpc is not None and \\\n not all(key in hpc for key in ('multi_shot_optimization', 'omp_num_threads')):\n raise TranspilerError('Unknown HPC parameter format!')\n basis_gates = basis_gates or backend_conf['basis_gates']\n coupling_map = coupling_map or backend_conf['coupling_map']\n\n # step 1: Making the list of dag circuits\n dags = _circuits_2_dags(circuits)\n\n # step 2: Transpile all the dags\n\n # FIXME: Work-around for transpiling multiple circuits with different qreg names.\n # Make compile take a list of initial_layouts.\n _initial_layout = initial_layout\n\n # Pick a good initial layout if coupling_map is not already satisfied\n # otherwise keep it as q[i]->q[i].\n # TODO: move this inside mapper pass.\n initial_layouts = []\n for dag in dags:\n if (initial_layout is None and not backend.configuration()['simulator']\n and not _matches_coupling_map(dag, coupling_map)):\n _initial_layout = _pick_best_layout(dag, backend)\n initial_layouts.append(_initial_layout)\n dags = _transpile_dags(dags, basis_gates=basis_gates, coupling_map=coupling_map,\n initial_layouts=initial_layouts, seed=seed,\n pass_manager=pass_manager)\n\n # step 3: Making a qobj\n qobj = _dags_2_qobj(dags, backend_name=backend_name,\n config=config, shots=shots, max_credits=max_credits,\n qobj_id=qobj_id, basis_gates=basis_gates,\n coupling_map=coupling_map, seed=seed)\n\n return qobj\n\n\ndef _circuits_2_dags(circuits):\n \"\"\"Convert a list of circuits into a list of dags.\n\n Args:\n circuits (list[QuantumCircuit]): circuit to compile\n\n Returns:\n list[DAGCircuit]: the dag representation of the circuits\n to be used in the transpiler\n \"\"\"\n dags = parallel_map(DAGCircuit.fromQuantumCircuit, circuits)\n return dags\n\n\ndef _transpile_dags(dags, basis_gates='u1,u2,u3,cx,id', coupling_map=None,\n initial_layouts=None, seed=None, pass_manager=None):\n \"\"\"Transform multiple dags through a sequence of passes.\n\n Args:\n dags (list[DAGCircuit]): dag circuits to transform\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling\n initial_layouts (list[dict]): A mapping of qubit to qubit for each dag\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n\n Returns:\n list[DAGCircuit]: the dag circuits after going through transpilation\n\n Raises:\n TranspilerError: if the format is not valid.\n \"\"\"\n\n index = list(range(len(dags)))\n final_dags = parallel_map(_transpile_dags_parallel, index,\n task_args=(dags, initial_layouts),\n task_kwargs={'basis_gates': basis_gates,\n 'coupling_map': coupling_map,\n 'seed': seed,\n 'pass_manager': pass_manager})\n return final_dags\n\n\ndef _transpile_dags_parallel(idx, dags, initial_layouts, basis_gates='u1,u2,u3,cx,id',\n coupling_map=None, seed=None, pass_manager=None):\n \"\"\"Helper function for transpiling in parallel (if available).\n\n Args:\n idx (int): Index for dag of interest\n dags (list): List of dags\n initial_layouts (list): List of initial layouts\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n Returns:\n DAGCircuit: DAG circuit after going through transpilation.\n \"\"\"\n dag = dags[idx]\n initial_layout = initial_layouts[idx]\n final_dag, final_layout = transpile(\n dag,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n initial_layout=initial_layout,\n get_layout=True,\n seed=seed,\n pass_manager=pass_manager)\n final_dag.layout = [[k, v]\n for k, v in final_layout.items()] if final_layout else None\n return final_dag\n\n\ndef _dags_2_qobj(dags, backend_name, config=None, shots=None,\n max_credits=None, qobj_id=None, basis_gates=None, coupling_map=None,\n seed=None):\n \"\"\"Convert a list of dags into a qobj.\n\n Args:\n dags (list[DAGCircuit]): dags to compile\n backend_name (str): name of runner backend\n config (dict): dictionary of parameters (e.g. noise) used by runner\n shots (int): number of repetitions of each circuit, for sampling\n max_credits (int): maximum credits to use\n qobj_id (int): identifier for the generated qobj\n basis_gates (list[str])): basis gates for the experiment\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n seed (int): random seed for simulators\n\n Returns:\n Qobj: the Qobj to be run on the backends\n \"\"\"\n # TODO: the following will be removed from qobj and thus removed here:\n # `basis_gates`, `coupling_map`\n\n # Step 1: create the Qobj, with empty experiments.\n # Copy the configuration: the values in `config` have preference\n qobj_config = deepcopy(config or {})\n # TODO: \"memory_slots\" is required by the qobj schema in the top-level\n # qobj.config, and is user-defined. At the moment is set to the maximum\n # number of *register* slots for the circuits, in order to have `measure`\n # behave properly until the transition is over; and each circuit stores\n # its memory_slots in its configuration.\n qobj_config.update({'shots': shots,\n 'max_credits': max_credits,\n 'memory_slots': 0})\n\n qobj = Qobj(qobj_id=qobj_id or str(uuid.uuid4()),\n config=QobjConfig(**qobj_config),\n experiments=[],\n header=QobjHeader(backend_name=backend_name))\n if seed:\n qobj.config.seed = seed\n\n qobj.experiments = parallel_map(_dags_2_qobj_parallel, dags,\n task_kwargs={'basis_gates': basis_gates,\n 'config': config,\n 'coupling_map': coupling_map})\n\n # Update the `memory_slots` value.\n # TODO: remove when `memory_slots` can be provided by the user.\n qobj.config.memory_slots = max(experiment.config.memory_slots for\n experiment in qobj.experiments)\n\n # Update the `n_qubits` global value.\n # TODO: num_qubits is not part of the qobj specification, but needed\n # for the simulator.\n qobj.config.n_qubits = max(experiment.config.n_qubits for\n experiment in qobj.experiments)\n\n return qobj\n\n\ndef _dags_2_qobj_parallel(dag, config=None, basis_gates=None,\n coupling_map=None):\n \"\"\"Helper function for dags to qobj in parallel (if available).\n\n Args:\n dag (DAGCircuit): DAG to compile\n config (dict): dictionary of parameters (e.g. noise) used by runner\n basis_gates (list[str])): basis gates for the experiment\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n\n Returns:\n Qobj: Qobj to be run on the backends\n \"\"\"\n json_circuit = DagUnroller(dag, JsonBackend(dag.basis)).execute()\n # Step 3a: create the Experiment based on json_circuit\n experiment = QobjExperiment.from_dict(json_circuit)\n # Step 3b: populate the Experiment configuration and header\n experiment.header.name = dag.name\n # TODO: place in header or config?\n experiment_config = deepcopy(config or {})\n experiment_config.update({\n 'coupling_map': coupling_map,\n 'basis_gates': basis_gates,\n 'layout': dag.layout,\n 'memory_slots': sum(dag.cregs.values()),\n # TODO: `n_qubits` is not part of the qobj spec, but needed for the simulator.\n 'n_qubits': sum(dag.qregs.values())})\n experiment.config = QobjItem(**experiment_config)\n\n # set eval_symbols=True to evaluate each symbolic expression\n # TODO: after transition to qobj, we can drop this\n experiment.header.compiled_circuit_qasm = dag.qasm(\n qeflag=True, eval_symbols=True)\n # Step 3c: add the Experiment to the Qobj\n return experiment\n\n\ndef transpile(dag, basis_gates='u1,u2,u3,cx,id', coupling_map=None,\n initial_layout=None, get_layout=False,\n format='dag', seed=None, pass_manager=None):\n \"\"\"Transform a dag circuit into another dag circuit (transpile), through\n consecutive passes on the dag.\n\n Args:\n dag (DAGCircuit): dag circuit to transform via transpilation\n basis_gates (str): a comma seperated string for the target basis gates\n coupling_map (list): A graph of coupling::\n\n [\n [control0(int), target0(int)],\n [control1(int), target1(int)],\n ]\n\n eg. [[0, 2], [1, 2], [1, 3], [3, 4]}\n\n initial_layout (dict): A mapping of qubit to qubit::\n\n {\n (\"q\", start(int)): (\"q\", final(int)),\n ...\n }\n eg.\n {\n (\"q\", 0): (\"q\", 0),\n (\"q\", 1): (\"q\", 1),\n (\"q\", 2): (\"q\", 2),\n (\"q\", 3): (\"q\", 3)\n }\n get_layout (bool): flag for returning the final layout after mapping\n format (str): The target format of the compilation:\n {'dag', 'json', 'qasm'}\n seed (int): random seed for the swap mapper\n pass_manager (PassManager): pass manager instance for the tranpilation process\n If None, a default set of passes are run.\n Otherwise, the passes defined in it will run.\n If contains no passes in it, no dag transformations occur.\n\n Returns:\n DAGCircuit: transformed dag\n DAGCircuit, dict: transformed dag along with the final layout on backend qubits\n\n Raises:\n TranspilerError: if the format is not valid.\n \"\"\"\n # TODO: `basis_gates` will be removed after we have the unroller pass.\n # TODO: `coupling_map`, `initial_layout`, `get_layout`, `seed` removed after mapper pass.\n\n # TODO: move this to the mapper pass\n num_qubits = sum(dag.qregs.values())\n if num_qubits == 1 or coupling_map == \"all-to-all\":\n coupling_map = None\n\n final_layout = None\n\n if pass_manager:\n # run the passes specified by the pass manager\n for pass_ in pass_manager.passes():\n pass_.run(dag)\n else:\n # default set of passes\n # TODO: move each step here to a pass, and use a default passmanager below\n basis = basis_gates.split(',') if basis_gates else []\n dag_unroller = DagUnroller(dag, DAGBackend(basis))\n dag = dag_unroller.expand_gates()\n # if a coupling map is given compile to the map\n if coupling_map:\n logger.info(\"pre-mapping properties: %s\",\n dag.property_summary())\n # Insert swap gates\n coupling = Coupling(coupling_list2dict(coupling_map))\n removed_meas = remove_last_measurements(dag)\n logger.info(\"measurements moved: %s\", removed_meas)\n logger.info(\"initial layout: %s\", initial_layout)\n dag, final_layout, last_layout = swap_mapper(\n dag, coupling, initial_layout, trials=20, seed=seed)\n logger.info(\"final layout: %s\", final_layout)\n # Expand swaps\n dag_unroller = DagUnroller(dag, DAGBackend(basis))\n dag = dag_unroller.expand_gates()\n # Change cx directions\n dag = direction_mapper(dag, coupling)\n # Simplify cx gates\n cx_cancellation(dag)\n # Simplify single qubit gates\n dag = optimize_1q_gates(dag)\n return_last_measurements(dag, removed_meas,\n last_layout)\n logger.info(\"post-mapping properties: %s\",\n dag.property_summary())\n\n # choose output format\n # TODO: do we need all of these formats, or just the dag?\n if format == 'dag':\n compiled_circuit = dag\n elif format == 'json':\n # FIXME: JsonBackend is wrongly taking an ordered dict as basis, not list\n dag_unroller = DagUnroller(dag, JsonBackend(dag.basis))\n compiled_circuit = dag_unroller.execute()\n elif format == 'qasm':\n compiled_circuit = dag.qasm()\n else:\n raise TranspilerError('unrecognized circuit format')\n\n if get_layout:\n return compiled_circuit, final_layout\n return compiled_circuit\n\n\ndef _best_subset(backend, n_qubits):\n \"\"\"Computes the qubit mapping with the best\n connectivity.\n\n Parameters:\n backend (Qiskit.BaseBackend): A QISKit backend instance.\n n_qubits (int): Number of subset qubits to consider.\n\n Returns:\n ndarray: Array of qubits to use for best\n connectivity mapping.\n\n Raises:\n QISKitError: Wrong number of qubits given.\n \"\"\"\n if n_qubits == 1:\n return np.array([0])\n elif n_qubits <= 0:\n raise QISKitError('Number of qubits <= 0.')\n\n device_qubits = backend.configuration()['n_qubits']\n if n_qubits > device_qubits:\n raise QISKitError('Number of qubits greater than device.')\n\n cmap = np.asarray(backend.configuration()['coupling_map'])\n data = np.ones_like(cmap[:, 0])\n sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])),\n shape=(device_qubits, device_qubits)).tocsr()\n best = 0\n best_map = None\n # do bfs with each node as starting point\n for k in range(sp_cmap.shape[0]):\n bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False,\n return_predecessors=False)\n\n connection_count = 0\n for i in range(n_qubits):\n node_idx = bfs[i]\n for j in range(sp_cmap.indptr[node_idx],\n sp_cmap.indptr[node_idx + 1]):\n node = sp_cmap.indices[j]\n for counter in range(n_qubits):\n if node == bfs[counter]:\n connection_count += 1\n break\n\n if connection_count > best:\n best = connection_count\n best_map = bfs[0:n_qubits]\n return best_map\n\n\ndef _matches_coupling_map(dag, coupling_map):\n \"\"\"Iterate over circuit gates to check if all multi-qubit couplings\n match the qubit coupling graph in the backend.\n\n Parameters:\n dag (DAGCircuit): DAG representation of circuit.\n coupling_map (list): Backend coupling map, represented as an adjacency list.\n\n Returns:\n bool: True if all gates readily fit the backend coupling graph.\n False if there's at least one gate that uses multiple qubits\n which does not match the backend couplings.\n \"\"\"\n match = True\n for _, data in dag.multi_graph.nodes(data=True):\n if data['type'] == 'op':\n gate_map = [qr[1] for qr in data['qargs']]\n if len(gate_map) > 1:\n if gate_map not in coupling_map:\n match = False\n break\n return match\n\n\ndef _pick_best_layout(dag, backend):\n \"\"\"Pick a convenient layout depending on the best matching qubit connectivity\n\n Parameters:\n dag (DAGCircuit): DAG representation of circuit.\n backend (BaseBackend) : The backend with the coupling_map for searching\n\n Returns:\n dict: A special ordered initial_layout\n\n \"\"\"\n num_qubits = sum(dag.qregs.values())\n best_sub = _best_subset(backend, num_qubits)\n layout = {}\n map_iter = 0\n for key, value in dag.qregs.items():\n for i in range(value):\n layout[(key, i)] = ('q', best_sub[map_iter])\n map_iter += 1\n return layout\n", "path": "qiskit/transpiler/_transpiler.py" } ]
diff --git a/qiskit/transpiler/_transpiler.py b/qiskit/transpiler/_transpiler.py index 4479b35b6706..7f6c68392af2 100644 --- a/qiskit/transpiler/_transpiler.py +++ b/qiskit/transpiler/_transpiler.py @@ -457,9 +457,10 @@ def _matches_coupling_map(dag, coupling_map): for _, data in dag.multi_graph.nodes(data=True): if data['type'] == 'op': gate_map = [qr[1] for qr in data['qargs']] - if gate_map not in coupling_map: - match = False - break + if len(gate_map) > 1: + if gate_map not in coupling_map: + match = False + break return match
_matches_coupling_map seems to check single qubit ops too and fails <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: 0.6.0 - **Python version**: 3.6 - **Operating system**: macos ### What is the current behavior? Using _matches_coupling_map breaks and returns false at first single qubit op as single qubits are not in the coupling map ### Steps to reproduce the problem Run the function on a dag ### What is the expected behavior? Ignore single qubits ops ### Suggested solutions Check no of qubits. Have fixed and pull request ready to go if ok. 👍
ARM-DOE__ACT-396
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Atmospheric data Community Toolkit documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 28 12:35:56 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n 'matplotlib.sphinxext.plot_directive',\n 'sphinx_copybutton',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.napoleon',\n]\n\nexclude_patterns = ['_build', '**.ipynb_checkpoints']\nsphinx_gallery_conf = {\n 'examples_dirs': '../../examples',\n 'gallery_dirs': 'source/auto_examples'\n}\n\n# Configuration options for plot_directive. See:\n# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81\nplot_html_show_source_link = False\nplot_html_show_formats = False\n\n# Generate the API documentation when building\nautoclass_content = \"both\"\nautosummary_generate = True\nautosummary_imported_members = True\n\n# Otherwise, the Return parameter list looks different from the Parameter list\nnapoleon_use_rtype = False\nnapoleon_use_ivar = True\nnapoleon_include_init_with_doc = False\nnapoleon_use_param = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Atmospheric data Community Toolkit'\ncopyright = '2018, ACT Developers'\nauthor = 'ACT Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nimport act\n# The short X.Y version.\nversion = act.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = act.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'act'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',\n 'Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n author, 'act', 'Package for connecting users to the data',\n 'Miscellaneous'),\n]\n\n\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),\n 'matplotlib': ('https://matplotlib.org', None),\n}\n", "path": "docs/source/conf.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Atmospheric data Community Toolkit documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 28 12:35:56 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n 'matplotlib.sphinxext.plot_directive',\n 'sphinx_copybutton',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.napoleon',\n]\n\nexclude_patterns = ['_build', '**.ipynb_checkpoints']\nsphinx_gallery_conf = {\n 'examples_dirs': '../../examples',\n 'gallery_dirs': 'source/auto_examples'\n}\n\n# Configuration options for plot_directive. See:\n# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81\nplot_html_show_source_link = False\nplot_html_show_formats = False\n\n# Generate the API documentation when building\nautoclass_content = \"both\"\nautosummary_generate = True\nautosummary_imported_members = True\n\n# Otherwise, the Return parameter list looks different from the Parameter list\nnapoleon_use_rtype = False\nnapoleon_use_ivar = True\nnapoleon_include_init_with_doc = False\nnapoleon_use_param = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Atmospheric data Community Toolkit'\ncopyright = '2018, ACT Developers'\nauthor = 'ACT Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nimport act\n# The short X.Y version.\nversion = act.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = act.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'google_analytics_id': 'UA-179020619-3',\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'act'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',\n 'Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n author, 'act', 'Package for connecting users to the data',\n 'Miscellaneous'),\n]\n\n\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),\n 'matplotlib': ('https://matplotlib.org', None),\n}\n", "path": "docs/source/conf.py" } ]
diff --git a/docs/source/conf.py b/docs/source/conf.py index b3f210966e..252d718671 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -126,7 +126,9 @@ # further. For a list of options available for each theme, see the # documentation. # -# html_theme_options = {} +html_theme_options = { + 'google_analytics_id': 'UA-179020619-3', +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files,
Add Google Analytics ID Add a Google Analytics ID to the `conf.py` file used by sphinx. For those interested in having access the analytics, you will need to send over your gmail address Fixes #396
evennia__evennia-2748
[ { "content": "\"\"\"\nThis implements the common managers that are used by the\nabstract models in dbobjects.py (and which are thus shared by\nall Attributes and TypedObjects).\n\n\"\"\"\nimport shlex\nfrom django.db.models import F, Q, Count, ExpressionWrapper, FloatField\nfrom django.db.models.functions import Cast\nfrom evennia.utils import idmapper\nfrom evennia.utils.utils import make_iter, variable_from_module\nfrom evennia.typeclasses.attributes import Attribute\nfrom evennia.typeclasses.tags import Tag\n\n__all__ = (\"TypedObjectManager\",)\n_GA = object.__getattribute__\n_Tag = None\n\n\n# Managers\n\n\nclass TypedObjectManager(idmapper.manager.SharedMemoryManager):\n \"\"\"\n Common ObjectManager for all dbobjects.\n\n \"\"\"\n\n # common methods for all typed managers. These are used\n # in other methods. Returns querysets.\n\n # Attribute manager methods\n def get_attribute(\n self, key=None, category=None, value=None, strvalue=None, obj=None, attrtype=None, **kwargs\n ):\n \"\"\"\n Return Attribute objects by key, by category, by value, by strvalue, by\n object (it is stored on) or with a combination of those criteria.\n\n Args:\n key (str, optional): The attribute's key to search for\n category (str, optional): The category of the attribute(s) to search for.\n value (str, optional): The attribute value to search for.\n Note that this is not a very efficient operation since it\n will query for a pickled entity. Mutually exclusive to\n `strvalue`.\n strvalue (str, optional): The str-value to search for.\n Most Attributes will not have strvalue set. This is\n mutually exclusive to the `value` keyword and will take\n precedence if given.\n obj (Object, optional): On which object the Attribute to\n search for is.\n attrype (str, optional): An attribute-type to search for.\n By default this is either `None` (normal Attributes) or\n `\"nick\"`.\n **kwargs (any): Currently unused. Reserved for future use.\n\n Returns:\n list: The matching Attributes.\n\n \"\"\"\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = [(\"attribute__db_attrtype\", attrtype), (\"attribute__db_model\", dbmodel)]\n if obj:\n query.append((\"%s__id\" % self.model.__dbclass__.__name__.lower(), obj.id))\n if key:\n query.append((\"attribute__db_key\", key))\n if category:\n query.append((\"attribute__db_category\", category))\n if strvalue:\n query.append((\"attribute__db_strvalue\", strvalue))\n if value:\n # no reason to make strvalue/value mutually exclusive at this level\n query.append((\"attribute__db_value\", value))\n return Attribute.objects.filter(\n pk__in=self.model.db_attributes.through.objects.filter(**dict(query)).values_list(\n \"attribute_id\", flat=True\n )\n )\n\n def get_nick(self, key=None, category=None, value=None, strvalue=None, obj=None):\n \"\"\"\n Get a nick, in parallel to `get_attribute`.\n\n Args:\n key (str, optional): The nicks's key to search for\n category (str, optional): The category of the nicks(s) to search for.\n value (str, optional): The attribute value to search for. Note that this\n is not a very efficient operation since it will query for a pickled\n entity. Mutually exclusive to `strvalue`.\n strvalue (str, optional): The str-value to search for. Most Attributes\n will not have strvalue set. This is mutually exclusive to the `value`\n keyword and will take precedence if given.\n obj (Object, optional): On which object the Attribute to search for is.\n\n Returns:\n nicks (list): The matching Nicks.\n\n \"\"\"\n return self.get_attribute(\n key=key, category=category, value=value, strvalue=strvalue, obj=obj\n )\n\n def get_by_attribute(\n self, key=None, category=None, value=None, strvalue=None, attrtype=None, **kwargs\n ):\n \"\"\"\n Return objects having attributes with the given key, category,\n value, strvalue or combination of those criteria.\n\n Args:\n key (str, optional): The attribute's key to search for\n category (str, optional): The category of the attribute\n to search for.\n value (str, optional): The attribute value to search for.\n Note that this is not a very efficient operation since it\n will query for a pickled entity. Mutually exclusive to\n `strvalue`.\n strvalue (str, optional): The str-value to search for.\n Most Attributes will not have strvalue set. This is\n mutually exclusive to the `value` keyword and will take\n precedence if given.\n attrype (str, optional): An attribute-type to search for.\n By default this is either `None` (normal Attributes) or\n `\"nick\"`.\n kwargs (any): Currently unused. Reserved for future use.\n\n Returns:\n obj (list): Objects having the matching Attributes.\n\n \"\"\"\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = [\n (\"db_attributes__db_attrtype\", attrtype),\n (\"db_attributes__db_model\", dbmodel),\n ]\n if key:\n query.append((\"db_attributes__db_key\", key))\n if category:\n query.append((\"db_attributes__db_category\", category))\n if strvalue:\n query.append((\"db_attributes__db_strvalue\", strvalue))\n elif value:\n # strvalue and value are mutually exclusive\n query.append((\"db_attributes__db_value\", value))\n return self.filter(**dict(query))\n\n def get_by_nick(self, key=None, nick=None, category=\"inputline\"):\n \"\"\"\n Get object based on its key or nick.\n\n Args:\n key (str, optional): The attribute's key to search for\n nick (str, optional): The nickname to search for\n category (str, optional): The category of the nick\n to search for.\n\n Returns:\n obj (list): Objects having the matching Nicks.\n\n \"\"\"\n return self.get_by_attribute(key=key, category=category, strvalue=nick, attrtype=\"nick\")\n\n # Tag manager methods\n\n def get_tag(self, key=None, category=None, obj=None, tagtype=None, global_search=False):\n \"\"\"\n Return Tag objects by key, by category, by object (it is\n stored on) or with a combination of those criteria.\n\n Args:\n key (str, optional): The Tag's key to search for\n category (str, optional): The Tag of the attribute(s)\n to search for.\n obj (Object, optional): On which object the Tag to\n search for is.\n tagtype (str, optional): One of `None` (normal tags),\n \"alias\" or \"permission\"\n global_search (bool, optional): Include all possible tags,\n not just tags on this object\n\n Returns:\n tag (list): The matching Tags.\n\n \"\"\"\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n dbmodel = self.model.__dbclass__.__name__.lower()\n if global_search:\n # search all tags using the Tag model\n query = [(\"db_tagtype\", tagtype), (\"db_model\", dbmodel)]\n if obj:\n query.append((\"id\", obj.id))\n if key:\n query.append((\"db_key\", key))\n if category:\n query.append((\"db_category\", category))\n return _Tag.objects.filter(**dict(query))\n else:\n # search only among tags stored on on this model\n query = [(\"tag__db_tagtype\", tagtype), (\"tag__db_model\", dbmodel)]\n if obj:\n query.append((\"%s__id\" % self.model.__name__.lower(), obj.id))\n if key:\n query.append((\"tag__db_key\", key))\n if category:\n query.append((\"tag__db_category\", category))\n return Tag.objects.filter(\n pk__in=self.model.db_tags.through.objects.filter(**dict(query)).values_list(\n \"tag_id\", flat=True\n )\n )\n\n def get_permission(self, key=None, category=None, obj=None):\n \"\"\"\n Get a permission from the database.\n\n Args:\n key (str, optional): The permission's identifier.\n category (str, optional): The permission's category.\n obj (object, optional): The object on which this Tag is set.\n\n Returns:\n permission (list): Permission objects.\n\n \"\"\"\n return self.get_tag(key=key, category=category, obj=obj, tagtype=\"permission\")\n\n def get_alias(self, key=None, category=None, obj=None):\n \"\"\"\n Get an alias from the database.\n\n Args:\n key (str, optional): The permission's identifier.\n category (str, optional): The permission's category.\n obj (object, optional): The object on which this Tag is set.\n\n Returns:\n alias (list): Alias objects.\n\n \"\"\"\n return self.get_tag(key=key, category=category, obj=obj, tagtype=\"alias\")\n\n def get_by_tag(self, key=None, category=None, tagtype=None, **kwargs):\n \"\"\"\n Return objects having tags with a given key or category or combination of the two.\n Also accepts multiple tags/category/tagtype\n\n Args:\n key (str or list, optional): Tag key or list of keys. Not case sensitive.\n category (str or list, optional): Tag category. Not case sensitive.\n If `key` is a list, a single category can either apply to all\n keys in that list or this must be a list matching the `key`\n list element by element. If no `key` is given, all objects with\n tags of this category are returned.\n tagtype (str, optional): 'type' of Tag, by default\n this is either `None` (a normal Tag), `alias` or\n `permission`. This always apply to all queried tags.\n\n Keyword Args:\n match (str): \"all\" (default) or \"any\"; determines whether the\n target object must be tagged with ALL of the provided\n tags/categories or ANY single one. ANY will perform a weighted\n sort, so objects with more tag matches will outrank those with\n fewer tag matches.\n\n Returns:\n objects (list): Objects with matching tag.\n\n Raises:\n IndexError: If `key` and `category` are both lists and `category` is shorter\n than `key`.\n\n \"\"\"\n if not (key or category):\n return []\n\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n\n anymatch = \"any\" == kwargs.get(\"match\", \"all\").lower().strip()\n\n keys = make_iter(key) if key else []\n categories = make_iter(category) if category else []\n n_keys = len(keys)\n n_categories = len(categories)\n unique_categories = sorted(set(categories))\n n_unique_categories = len(unique_categories)\n\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = (\n self.filter(db_tags__db_tagtype__iexact=tagtype, db_tags__db_model__iexact=dbmodel)\n .distinct()\n .order_by(\"id\")\n )\n\n if n_keys > 0:\n # keys and/or categories given\n if n_categories == 0:\n categories = [None for _ in range(n_keys)]\n elif n_categories == 1 and n_keys > 1:\n cat = categories[0]\n categories = [cat for _ in range(n_keys)]\n elif 1 < n_categories < n_keys:\n raise IndexError(\n \"get_by_tag needs a single category or a list of categories \"\n \"the same length as the list of tags.\"\n )\n clauses = Q()\n for ikey, key in enumerate(keys):\n # ANY mode; must match any one of the given tags/categories\n clauses |= Q(db_key__iexact=key, db_category__iexact=categories[ikey])\n else:\n # only one or more categories given\n clauses = Q()\n # ANY mode; must match any one of them\n for category in unique_categories:\n clauses |= Q(db_category__iexact=category)\n\n tags = _Tag.objects.filter(clauses)\n query = query.filter(db_tags__in=tags).annotate(\n matches=Count(\"db_tags__pk\", filter=Q(db_tags__in=tags), distinct=True)\n )\n\n if anymatch:\n # ANY: Match any single tag, ordered by weight\n query = query.order_by(\"-matches\")\n else:\n # Default ALL: Match all of the tags and optionally more\n n_req_tags = n_keys if n_keys > 0 else n_unique_categories\n query = query.filter(matches__gte=n_req_tags)\n\n return query\n\n def get_by_permission(self, key=None, category=None):\n \"\"\"\n Return objects having permissions with a given key or category or\n combination of the two.\n\n Args:\n key (str, optional): Permissions key. Not case sensitive.\n category (str, optional): Permission category. Not case sensitive.\n Returns:\n objects (list): Objects with matching permission.\n \"\"\"\n return self.get_by_tag(key=key, category=category, tagtype=\"permission\")\n\n def get_by_alias(self, key=None, category=None):\n \"\"\"\n Return objects having aliases with a given key or category or\n combination of the two.\n\n Args:\n key (str, optional): Alias key. Not case sensitive.\n category (str, optional): Alias category. Not case sensitive.\n Returns:\n objects (list): Objects with matching alias.\n \"\"\"\n return self.get_by_tag(key=key, category=category, tagtype=\"alias\")\n\n def create_tag(self, key=None, category=None, data=None, tagtype=None):\n \"\"\"\n Create a new Tag of the base type associated with this\n object. This makes sure to create case-insensitive tags.\n If the exact same tag configuration (key+category+tagtype+dbmodel)\n exists on the model, a new tag will not be created, but an old\n one returned.\n\n\n Args:\n key (str, optional): Tag key. Not case sensitive.\n category (str, optional): Tag category. Not case sensitive.\n data (str, optional): Extra information about the tag.\n tagtype (str or None, optional): 'type' of Tag, by default\n this is either `None` (a normal Tag), `alias` or\n `permission`.\n Notes:\n The `data` field is not part of the uniqueness of the tag:\n Setting `data` on an existing tag will overwrite the old\n data field. It is intended only as a way to carry\n information about the tag (like a help text), not to carry\n any information about the tagged objects themselves.\n\n \"\"\"\n data = str(data) if data is not None else None\n # try to get old tag\n\n dbmodel = self.model.__dbclass__.__name__.lower()\n tag = self.get_tag(key=key, category=category, tagtype=tagtype, global_search=True)\n if tag and data is not None:\n # get tag from list returned by get_tag\n tag = tag[0]\n # overload data on tag\n tag.db_data = data\n tag.save()\n elif not tag:\n # create a new tag\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n tag = _Tag.objects.create(\n db_key=key.strip().lower() if key is not None else None,\n db_category=category.strip().lower() if category and key is not None else None,\n db_data=data,\n db_model=dbmodel,\n db_tagtype=tagtype.strip().lower() if tagtype is not None else None,\n )\n tag.save()\n return make_iter(tag)[0]\n\n def dbref(self, dbref, reqhash=True):\n \"\"\"\n Determing if input is a valid dbref.\n\n Args:\n dbref (str or int): A possible dbref.\n reqhash (bool, optional): If the \"#\" is required for this\n to be considered a valid hash.\n\n Returns:\n dbref (int or None): The integer part of the dbref.\n\n Notes:\n Valid forms of dbref (database reference number) are\n either a string '#N' or an integer N.\n\n \"\"\"\n if reqhash and not (isinstance(dbref, str) and dbref.startswith(\"#\")):\n return None\n if isinstance(dbref, str):\n dbref = dbref.lstrip(\"#\")\n try:\n if int(dbref) < 0:\n return None\n except Exception:\n return None\n return dbref\n\n def get_id(self, dbref):\n \"\"\"\n Find object with given dbref.\n\n Args:\n dbref (str or int): The id to search for.\n\n Returns:\n object (TypedObject): The matched object.\n\n \"\"\"\n dbref = self.dbref(dbref, reqhash=False)\n try:\n return self.get(id=dbref)\n except self.model.DoesNotExist:\n pass\n return None\n\n def dbref_search(self, dbref):\n \"\"\"\n Alias to get_id.\n\n Args:\n dbref (str or int): The id to search for.\n\n Returns:\n Queryset: Queryset with 0 or 1 match.\n\n \"\"\"\n dbref = self.dbref(dbref, reqhash=False)\n if dbref:\n return self.filter(id=dbref)\n return self.none()\n\n def get_dbref_range(self, min_dbref=None, max_dbref=None):\n \"\"\"\n Get objects within a certain range of dbrefs.\n\n Args:\n min_dbref (int): Start of dbref range.\n max_dbref (int): End of dbref range (inclusive)\n\n Returns:\n objects (list): TypedObjects with dbrefs within\n the given dbref ranges.\n\n \"\"\"\n retval = super().all()\n if min_dbref is not None:\n retval = retval.filter(id__gte=self.dbref(min_dbref, reqhash=False))\n if max_dbref is not None:\n retval = retval.filter(id__lte=self.dbref(max_dbref, reqhash=False))\n return retval\n\n def get_typeclass_totals(self, *args, **kwargs) -> object:\n \"\"\"\n Returns a queryset of typeclass composition statistics.\n\n Returns:\n qs (Queryset): A queryset of dicts containing the typeclass (name),\n the count of objects with that typeclass and a float representing\n the percentage of objects associated with the typeclass.\n\n \"\"\"\n return (\n self.values(\"db_typeclass_path\")\n .distinct()\n .annotate(\n # Get count of how many objects for each typeclass exist\n count=Count(\"db_typeclass_path\")\n )\n .annotate(\n # Rename db_typeclass_path field to something more human\n typeclass=F(\"db_typeclass_path\"),\n # Calculate this class' percentage of total composition\n percent=ExpressionWrapper(\n ((F(\"count\") / float(self.count())) * 100.0),\n output_field=FloatField(),\n ),\n )\n .values(\"typeclass\", \"count\", \"percent\")\n )\n\n def object_totals(self):\n \"\"\"\n Get info about database statistics.\n\n Returns:\n census (dict): A dictionary `{typeclass_path: number, ...}` with\n all the typeclasses active in-game as well as the number\n of such objects defined (i.e. the number of database\n object having that typeclass set on themselves).\n\n \"\"\"\n stats = self.get_typeclass_totals().order_by(\"typeclass\")\n return {x.get(\"typeclass\"): x.get(\"count\") for x in stats}\n\n def typeclass_search(self, typeclass, include_children=False, include_parents=False):\n \"\"\"\n Searches through all objects returning those which has a\n certain typeclass. If location is set, limit search to objects\n in that location.\n\n Args:\n typeclass (str or class): A typeclass class or a python path to a typeclass.\n include_children (bool, optional): Return objects with\n given typeclass *and* all children inheriting from this\n typeclass. Mutuall exclusive to `include_parents`.\n include_parents (bool, optional): Return objects with\n given typeclass *and* all parents to this typeclass.\n Mutually exclusive to `include_children`.\n\n Returns:\n objects (list): The objects found with the given typeclasses.\n\n \"\"\"\n\n if callable(typeclass):\n cls = typeclass.__class__\n typeclass = \"%s.%s\" % (cls.__module__, cls.__name__)\n elif not isinstance(typeclass, str) and hasattr(typeclass, \"path\"):\n typeclass = typeclass.path\n\n # query objects of exact typeclass\n query = Q(db_typeclass_path__exact=typeclass)\n\n if include_children:\n # build requests for child typeclass objects\n clsmodule, clsname = typeclass.rsplit(\".\", 1)\n cls = variable_from_module(clsmodule, clsname)\n subclasses = cls.__subclasses__()\n if subclasses:\n for child in (child for child in subclasses if hasattr(child, \"path\")):\n query = query | Q(db_typeclass_path__exact=child.path)\n elif include_parents:\n # build requests for parent typeclass objects\n clsmodule, clsname = typeclass.rsplit(\".\", 1)\n cls = variable_from_module(clsmodule, clsname)\n parents = cls.__mro__\n if parents:\n for parent in (parent for parent in parents if hasattr(parent, \"path\")):\n query = query | Q(db_typeclass_path__exact=parent.path)\n # actually query the database\n return super().filter(query)\n\n\nclass TypeclassManager(TypedObjectManager):\n \"\"\"\n Manager for the typeclasses. The main purpose of this manager is\n to limit database queries to the given typeclass despite all\n typeclasses technically being defined in the same core database\n model.\n\n \"\"\"\n\n # object-manager methods\n def smart_search(self, query):\n \"\"\"\n Search by supplying a string with optional extra search criteria to aid the query.\n\n Args:\n query (str): A search criteria that accepts extra search criteria on the following\n forms:\n\n [key|alias|#dbref...]\n [tag==<tagstr>[:category]...]\n [attr==<key>:<value>:category...]\n\n All three can be combined in the same query, separated by spaces.\n\n Returns:\n matches (queryset): A queryset result matching all queries exactly. If wanting to use\n spaces or ==, != in tags or attributes, enclose them in quotes.\n\n Example:\n house = smart_search(\"key=foo alias=bar tag=house:building tag=magic attr=color:red\")\n\n Note:\n The flexibility of this method is limited by the input line format. Tag/attribute\n matching only works for matching primitives. For even more complex queries, such as\n 'in' operations or object field matching, use the full django query language.\n\n \"\"\"\n # shlex splits by spaces unless escaped by quotes\n querysplit = shlex.split(query)\n queries, plustags, plusattrs, negtags, negattrs = [], [], [], [], []\n for ipart, part in enumerate(querysplit):\n key, rest = part, \"\"\n if \":\" in part:\n key, rest = part.split(\":\", 1)\n # tags are on the form tag or tag:category\n if key.startswith(\"tag==\"):\n plustags.append((key[5:], rest))\n continue\n elif key.startswith(\"tag!=\"):\n negtags.append((key[5:], rest))\n continue\n # attrs are on the form attr:value or attr:value:category\n elif rest:\n value, category = rest, \"\"\n if \":\" in rest:\n value, category = rest.split(\":\", 1)\n if key.startswith(\"attr==\"):\n plusattrs.append((key[7:], value, category))\n continue\n elif key.startswith(\"attr!=\"):\n negattrs.append((key[7:], value, category))\n continue\n # if we get here, we are entering a key search criterion which\n # we assume is one word.\n queries.append(part)\n # build query from components\n query = \" \".join(queries)\n # TODO\n\n def get(self, *args, **kwargs):\n \"\"\"\n Overload the standard get. This will limit itself to only\n return the current typeclass.\n\n Args:\n args (any): These are passed on as arguments to the default\n django get method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django get method\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n kwargs.update({\"db_typeclass_path\": self.model.path})\n return super().get(**kwargs)\n\n def filter(self, *args, **kwargs):\n \"\"\"\n Overload of the standard filter function. This filter will\n limit itself to only the current typeclass.\n\n Args:\n args (any): These are passed on as arguments to the default\n django filter method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django filter method.\n Returns:\n objects (queryset): The objects found.\n\n \"\"\"\n kwargs.update({\"db_typeclass_path\": self.model.path})\n return super().filter(*args, **kwargs)\n\n def all(self):\n \"\"\"\n Overload method to return all matches, filtering for typeclass.\n\n Returns:\n objects (queryset): The objects found.\n\n \"\"\"\n return super().all().filter(db_typeclass_path=self.model.path)\n\n def first(self):\n \"\"\"\n Overload method to return first match, filtering for typeclass.\n\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).first()\n\n def last(self):\n \"\"\"\n Overload method to return last match, filtering for typeclass.\n\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).last()\n\n def count(self):\n \"\"\"\n Overload method to return number of matches, filtering for typeclass.\n\n Returns:\n integer : Number of objects found.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).count()\n\n def annotate(self, *args, **kwargs):\n \"\"\"\n Overload annotate method to filter on typeclass before annotating.\n Args:\n *args (any): Positional arguments passed along to queryset annotate method.\n **kwargs (any): Keyword arguments passed along to queryset annotate method.\n\n Returns:\n Annotated queryset.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).annotate(*args, **kwargs)\n\n def values(self, *args, **kwargs):\n \"\"\"\n Overload values method to filter on typeclass first.\n Args:\n *args (any): Positional arguments passed along to values method.\n **kwargs (any): Keyword arguments passed along to values method.\n\n Returns:\n Queryset of values dictionaries, just filtered by typeclass first.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).values(*args, **kwargs)\n\n def values_list(self, *args, **kwargs):\n \"\"\"\n Overload values method to filter on typeclass first.\n Args:\n *args (any): Positional arguments passed along to values_list method.\n **kwargs (any): Keyword arguments passed along to values_list method.\n\n Returns:\n Queryset of value_list tuples, just filtered by typeclass first.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).values_list(*args, **kwargs)\n\n def _get_subclasses(self, cls):\n \"\"\"\n Recursively get all subclasses to a class.\n\n Args:\n cls (classoject): A class to get subclasses from.\n \"\"\"\n all_subclasses = cls.__subclasses__()\n for subclass in all_subclasses:\n all_subclasses.extend(self._get_subclasses(subclass))\n return all_subclasses\n\n def get_family(self, *args, **kwargs):\n \"\"\"\n Variation of get that not only returns the current typeclass\n but also all subclasses of that typeclass.\n\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django get method.\n Returns:\n objects (list): The objects found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n kwargs.update({\"db_typeclass_path__in\": paths})\n return super().get(*args, **kwargs)\n\n def filter_family(self, *args, **kwargs):\n \"\"\"\n Variation of filter that allows results both from typeclass\n and from subclasses of typeclass\n\n Args:\n args (any): These are passed on as arguments to the default\n django filter method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django filter method.\n Returns:\n objects (list): The objects found.\n\n \"\"\"\n # query, including all subclasses\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n kwargs.update({\"db_typeclass_path__in\": paths})\n return super().filter(*args, **kwargs)\n\n def all_family(self):\n \"\"\"\n Return all matches, allowing matches from all subclasses of\n the typeclass.\n\n Returns:\n objects (list): The objects found.\n\n \"\"\"\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n return super().all().filter(db_typeclass_path__in=paths)\n", "path": "evennia/typeclasses/managers.py" } ]
[ { "content": "\"\"\"\nThis implements the common managers that are used by the\nabstract models in dbobjects.py (and which are thus shared by\nall Attributes and TypedObjects).\n\n\"\"\"\nimport shlex\nfrom django.db.models import F, Q, Count, ExpressionWrapper, FloatField\nfrom django.db.models.functions import Cast\nfrom evennia.utils import idmapper\nfrom evennia.utils.utils import make_iter, variable_from_module\nfrom evennia.typeclasses.attributes import Attribute\nfrom evennia.typeclasses.tags import Tag\n\n__all__ = (\"TypedObjectManager\",)\n_GA = object.__getattribute__\n_Tag = None\n\n\n# Managers\n\n\nclass TypedObjectManager(idmapper.manager.SharedMemoryManager):\n \"\"\"\n Common ObjectManager for all dbobjects.\n\n \"\"\"\n\n # common methods for all typed managers. These are used\n # in other methods. Returns querysets.\n\n # Attribute manager methods\n def get_attribute(\n self, key=None, category=None, value=None, strvalue=None, obj=None, attrtype=None, **kwargs\n ):\n \"\"\"\n Return Attribute objects by key, by category, by value, by strvalue, by\n object (it is stored on) or with a combination of those criteria.\n\n Args:\n key (str, optional): The attribute's key to search for\n category (str, optional): The category of the attribute(s) to search for.\n value (str, optional): The attribute value to search for.\n Note that this is not a very efficient operation since it\n will query for a pickled entity. Mutually exclusive to\n `strvalue`.\n strvalue (str, optional): The str-value to search for.\n Most Attributes will not have strvalue set. This is\n mutually exclusive to the `value` keyword and will take\n precedence if given.\n obj (Object, optional): On which object the Attribute to\n search for is.\n attrype (str, optional): An attribute-type to search for.\n By default this is either `None` (normal Attributes) or\n `\"nick\"`.\n **kwargs (any): Currently unused. Reserved for future use.\n\n Returns:\n list: The matching Attributes.\n\n \"\"\"\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = [(\"attribute__db_attrtype\", attrtype), (\"attribute__db_model\", dbmodel)]\n if obj:\n query.append((\"%s__id\" % self.model.__dbclass__.__name__.lower(), obj.id))\n if key:\n query.append((\"attribute__db_key\", key))\n if category:\n query.append((\"attribute__db_category\", category))\n if strvalue:\n query.append((\"attribute__db_strvalue\", strvalue))\n if value:\n # no reason to make strvalue/value mutually exclusive at this level\n query.append((\"attribute__db_value\", value))\n return Attribute.objects.filter(\n pk__in=self.model.db_attributes.through.objects.filter(**dict(query)).values_list(\n \"attribute_id\", flat=True\n )\n )\n\n def get_nick(self, key=None, category=None, value=None, strvalue=None, obj=None):\n \"\"\"\n Get a nick, in parallel to `get_attribute`.\n\n Args:\n key (str, optional): The nicks's key to search for\n category (str, optional): The category of the nicks(s) to search for.\n value (str, optional): The attribute value to search for. Note that this\n is not a very efficient operation since it will query for a pickled\n entity. Mutually exclusive to `strvalue`.\n strvalue (str, optional): The str-value to search for. Most Attributes\n will not have strvalue set. This is mutually exclusive to the `value`\n keyword and will take precedence if given.\n obj (Object, optional): On which object the Attribute to search for is.\n\n Returns:\n nicks (list): The matching Nicks.\n\n \"\"\"\n return self.get_attribute(\n key=key, category=category, value=value, strvalue=strvalue, obj=obj\n )\n\n def get_by_attribute(\n self, key=None, category=None, value=None, strvalue=None, attrtype=None, **kwargs\n ):\n \"\"\"\n Return objects having attributes with the given key, category,\n value, strvalue or combination of those criteria.\n\n Args:\n key (str, optional): The attribute's key to search for\n category (str, optional): The category of the attribute\n to search for.\n value (str, optional): The attribute value to search for.\n Note that this is not a very efficient operation since it\n will query for a pickled entity. Mutually exclusive to\n `strvalue`.\n strvalue (str, optional): The str-value to search for.\n Most Attributes will not have strvalue set. This is\n mutually exclusive to the `value` keyword and will take\n precedence if given.\n attrype (str, optional): An attribute-type to search for.\n By default this is either `None` (normal Attributes) or\n `\"nick\"`.\n kwargs (any): Currently unused. Reserved for future use.\n\n Returns:\n obj (list): Objects having the matching Attributes.\n\n \"\"\"\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = [\n (\"db_attributes__db_attrtype\", attrtype),\n (\"db_attributes__db_model\", dbmodel),\n ]\n if key:\n query.append((\"db_attributes__db_key\", key))\n if category:\n query.append((\"db_attributes__db_category\", category))\n if strvalue:\n query.append((\"db_attributes__db_strvalue\", strvalue))\n elif value:\n # strvalue and value are mutually exclusive\n query.append((\"db_attributes__db_value\", value))\n return self.filter(**dict(query))\n\n def get_by_nick(self, key=None, nick=None, category=\"inputline\"):\n \"\"\"\n Get object based on its key or nick.\n\n Args:\n key (str, optional): The attribute's key to search for\n nick (str, optional): The nickname to search for\n category (str, optional): The category of the nick\n to search for.\n\n Returns:\n obj (list): Objects having the matching Nicks.\n\n \"\"\"\n return self.get_by_attribute(key=key, category=category, strvalue=nick, attrtype=\"nick\")\n\n # Tag manager methods\n\n def get_tag(self, key=None, category=None, obj=None, tagtype=None, global_search=False):\n \"\"\"\n Return Tag objects by key, by category, by object (it is\n stored on) or with a combination of those criteria.\n\n Args:\n key (str, optional): The Tag's key to search for\n category (str, optional): The Tag of the attribute(s)\n to search for.\n obj (Object, optional): On which object the Tag to\n search for is.\n tagtype (str, optional): One of `None` (normal tags),\n \"alias\" or \"permission\"\n global_search (bool, optional): Include all possible tags,\n not just tags on this object\n\n Returns:\n tag (list): The matching Tags.\n\n \"\"\"\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n dbmodel = self.model.__dbclass__.__name__.lower()\n if global_search:\n # search all tags using the Tag model\n query = [(\"db_tagtype\", tagtype), (\"db_model\", dbmodel)]\n if obj:\n query.append((\"id\", obj.id))\n if key:\n query.append((\"db_key\", key))\n if category:\n query.append((\"db_category\", category))\n return _Tag.objects.filter(**dict(query))\n else:\n # search only among tags stored on on this model\n query = [(\"tag__db_tagtype\", tagtype), (\"tag__db_model\", dbmodel)]\n if obj:\n query.append((\"%s__id\" % self.model.__name__.lower(), obj.id))\n if key:\n query.append((\"tag__db_key\", key))\n if category:\n query.append((\"tag__db_category\", category))\n return Tag.objects.filter(\n pk__in=self.model.db_tags.through.objects.filter(**dict(query)).values_list(\n \"tag_id\", flat=True\n )\n )\n\n def get_permission(self, key=None, category=None, obj=None):\n \"\"\"\n Get a permission from the database.\n\n Args:\n key (str, optional): The permission's identifier.\n category (str, optional): The permission's category.\n obj (object, optional): The object on which this Tag is set.\n\n Returns:\n permission (list): Permission objects.\n\n \"\"\"\n return self.get_tag(key=key, category=category, obj=obj, tagtype=\"permission\")\n\n def get_alias(self, key=None, category=None, obj=None):\n \"\"\"\n Get an alias from the database.\n\n Args:\n key (str, optional): The permission's identifier.\n category (str, optional): The permission's category.\n obj (object, optional): The object on which this Tag is set.\n\n Returns:\n alias (list): Alias objects.\n\n \"\"\"\n return self.get_tag(key=key, category=category, obj=obj, tagtype=\"alias\")\n\n def get_by_tag(self, key=None, category=None, tagtype=None, **kwargs):\n \"\"\"\n Return objects having tags with a given key or category or combination of the two.\n Also accepts multiple tags/category/tagtype\n\n Args:\n key (str or list, optional): Tag key or list of keys. Not case sensitive.\n category (str or list, optional): Tag category. Not case sensitive.\n If `key` is a list, a single category can either apply to all\n keys in that list or this must be a list matching the `key`\n list element by element. If no `key` is given, all objects with\n tags of this category are returned.\n tagtype (str, optional): 'type' of Tag, by default\n this is either `None` (a normal Tag), `alias` or\n `permission`. This always apply to all queried tags.\n\n Keyword Args:\n match (str): \"all\" (default) or \"any\"; determines whether the\n target object must be tagged with ALL of the provided\n tags/categories or ANY single one. ANY will perform a weighted\n sort, so objects with more tag matches will outrank those with\n fewer tag matches.\n\n Returns:\n objects (list): Objects with matching tag.\n\n Raises:\n IndexError: If `key` and `category` are both lists and `category` is shorter\n than `key`.\n\n \"\"\"\n if not (key or category):\n return []\n\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n\n anymatch = \"any\" == kwargs.get(\"match\", \"all\").lower().strip()\n\n keys = make_iter(key) if key else []\n categories = make_iter(category) if category else []\n n_keys = len(keys)\n n_categories = len(categories)\n unique_categories = set(categories)\n n_unique_categories = len(unique_categories)\n\n dbmodel = self.model.__dbclass__.__name__.lower()\n query = (\n self.filter(db_tags__db_tagtype__iexact=tagtype, db_tags__db_model__iexact=dbmodel)\n .distinct()\n .order_by(\"id\")\n )\n\n if n_keys > 0:\n # keys and/or categories given\n if n_categories == 0:\n categories = [None for _ in range(n_keys)]\n elif n_categories == 1 and n_keys > 1:\n cat = categories[0]\n categories = [cat for _ in range(n_keys)]\n elif 1 < n_categories < n_keys:\n raise IndexError(\n \"get_by_tag needs a single category or a list of categories \"\n \"the same length as the list of tags.\"\n )\n clauses = Q()\n for ikey, key in enumerate(keys):\n # ANY mode; must match any one of the given tags/categories\n clauses |= Q(db_key__iexact=key, db_category__iexact=categories[ikey])\n else:\n # only one or more categories given\n clauses = Q()\n # ANY mode; must match any one of them\n for category in unique_categories:\n clauses |= Q(db_category__iexact=category)\n\n tags = _Tag.objects.filter(clauses)\n query = query.filter(db_tags__in=tags).annotate(\n matches=Count(\"db_tags__pk\", filter=Q(db_tags__in=tags), distinct=True)\n )\n\n if anymatch:\n # ANY: Match any single tag, ordered by weight\n query = query.order_by(\"-matches\")\n else:\n # Default ALL: Match all of the tags and optionally more\n n_req_tags = n_keys if n_keys > 0 else n_unique_categories\n query = query.filter(matches__gte=n_req_tags)\n\n return query\n\n def get_by_permission(self, key=None, category=None):\n \"\"\"\n Return objects having permissions with a given key or category or\n combination of the two.\n\n Args:\n key (str, optional): Permissions key. Not case sensitive.\n category (str, optional): Permission category. Not case sensitive.\n Returns:\n objects (list): Objects with matching permission.\n \"\"\"\n return self.get_by_tag(key=key, category=category, tagtype=\"permission\")\n\n def get_by_alias(self, key=None, category=None):\n \"\"\"\n Return objects having aliases with a given key or category or\n combination of the two.\n\n Args:\n key (str, optional): Alias key. Not case sensitive.\n category (str, optional): Alias category. Not case sensitive.\n Returns:\n objects (list): Objects with matching alias.\n \"\"\"\n return self.get_by_tag(key=key, category=category, tagtype=\"alias\")\n\n def create_tag(self, key=None, category=None, data=None, tagtype=None):\n \"\"\"\n Create a new Tag of the base type associated with this\n object. This makes sure to create case-insensitive tags.\n If the exact same tag configuration (key+category+tagtype+dbmodel)\n exists on the model, a new tag will not be created, but an old\n one returned.\n\n\n Args:\n key (str, optional): Tag key. Not case sensitive.\n category (str, optional): Tag category. Not case sensitive.\n data (str, optional): Extra information about the tag.\n tagtype (str or None, optional): 'type' of Tag, by default\n this is either `None` (a normal Tag), `alias` or\n `permission`.\n Notes:\n The `data` field is not part of the uniqueness of the tag:\n Setting `data` on an existing tag will overwrite the old\n data field. It is intended only as a way to carry\n information about the tag (like a help text), not to carry\n any information about the tagged objects themselves.\n\n \"\"\"\n data = str(data) if data is not None else None\n # try to get old tag\n\n dbmodel = self.model.__dbclass__.__name__.lower()\n tag = self.get_tag(key=key, category=category, tagtype=tagtype, global_search=True)\n if tag and data is not None:\n # get tag from list returned by get_tag\n tag = tag[0]\n # overload data on tag\n tag.db_data = data\n tag.save()\n elif not tag:\n # create a new tag\n global _Tag\n if not _Tag:\n from evennia.typeclasses.models import Tag as _Tag\n tag = _Tag.objects.create(\n db_key=key.strip().lower() if key is not None else None,\n db_category=category.strip().lower() if category and key is not None else None,\n db_data=data,\n db_model=dbmodel,\n db_tagtype=tagtype.strip().lower() if tagtype is not None else None,\n )\n tag.save()\n return make_iter(tag)[0]\n\n def dbref(self, dbref, reqhash=True):\n \"\"\"\n Determing if input is a valid dbref.\n\n Args:\n dbref (str or int): A possible dbref.\n reqhash (bool, optional): If the \"#\" is required for this\n to be considered a valid hash.\n\n Returns:\n dbref (int or None): The integer part of the dbref.\n\n Notes:\n Valid forms of dbref (database reference number) are\n either a string '#N' or an integer N.\n\n \"\"\"\n if reqhash and not (isinstance(dbref, str) and dbref.startswith(\"#\")):\n return None\n if isinstance(dbref, str):\n dbref = dbref.lstrip(\"#\")\n try:\n if int(dbref) < 0:\n return None\n except Exception:\n return None\n return dbref\n\n def get_id(self, dbref):\n \"\"\"\n Find object with given dbref.\n\n Args:\n dbref (str or int): The id to search for.\n\n Returns:\n object (TypedObject): The matched object.\n\n \"\"\"\n dbref = self.dbref(dbref, reqhash=False)\n try:\n return self.get(id=dbref)\n except self.model.DoesNotExist:\n pass\n return None\n\n def dbref_search(self, dbref):\n \"\"\"\n Alias to get_id.\n\n Args:\n dbref (str or int): The id to search for.\n\n Returns:\n Queryset: Queryset with 0 or 1 match.\n\n \"\"\"\n dbref = self.dbref(dbref, reqhash=False)\n if dbref:\n return self.filter(id=dbref)\n return self.none()\n\n def get_dbref_range(self, min_dbref=None, max_dbref=None):\n \"\"\"\n Get objects within a certain range of dbrefs.\n\n Args:\n min_dbref (int): Start of dbref range.\n max_dbref (int): End of dbref range (inclusive)\n\n Returns:\n objects (list): TypedObjects with dbrefs within\n the given dbref ranges.\n\n \"\"\"\n retval = super().all()\n if min_dbref is not None:\n retval = retval.filter(id__gte=self.dbref(min_dbref, reqhash=False))\n if max_dbref is not None:\n retval = retval.filter(id__lte=self.dbref(max_dbref, reqhash=False))\n return retval\n\n def get_typeclass_totals(self, *args, **kwargs) -> object:\n \"\"\"\n Returns a queryset of typeclass composition statistics.\n\n Returns:\n qs (Queryset): A queryset of dicts containing the typeclass (name),\n the count of objects with that typeclass and a float representing\n the percentage of objects associated with the typeclass.\n\n \"\"\"\n return (\n self.values(\"db_typeclass_path\")\n .distinct()\n .annotate(\n # Get count of how many objects for each typeclass exist\n count=Count(\"db_typeclass_path\")\n )\n .annotate(\n # Rename db_typeclass_path field to something more human\n typeclass=F(\"db_typeclass_path\"),\n # Calculate this class' percentage of total composition\n percent=ExpressionWrapper(\n ((F(\"count\") / float(self.count())) * 100.0),\n output_field=FloatField(),\n ),\n )\n .values(\"typeclass\", \"count\", \"percent\")\n )\n\n def object_totals(self):\n \"\"\"\n Get info about database statistics.\n\n Returns:\n census (dict): A dictionary `{typeclass_path: number, ...}` with\n all the typeclasses active in-game as well as the number\n of such objects defined (i.e. the number of database\n object having that typeclass set on themselves).\n\n \"\"\"\n stats = self.get_typeclass_totals().order_by(\"typeclass\")\n return {x.get(\"typeclass\"): x.get(\"count\") for x in stats}\n\n def typeclass_search(self, typeclass, include_children=False, include_parents=False):\n \"\"\"\n Searches through all objects returning those which has a\n certain typeclass. If location is set, limit search to objects\n in that location.\n\n Args:\n typeclass (str or class): A typeclass class or a python path to a typeclass.\n include_children (bool, optional): Return objects with\n given typeclass *and* all children inheriting from this\n typeclass. Mutuall exclusive to `include_parents`.\n include_parents (bool, optional): Return objects with\n given typeclass *and* all parents to this typeclass.\n Mutually exclusive to `include_children`.\n\n Returns:\n objects (list): The objects found with the given typeclasses.\n\n \"\"\"\n\n if callable(typeclass):\n cls = typeclass.__class__\n typeclass = \"%s.%s\" % (cls.__module__, cls.__name__)\n elif not isinstance(typeclass, str) and hasattr(typeclass, \"path\"):\n typeclass = typeclass.path\n\n # query objects of exact typeclass\n query = Q(db_typeclass_path__exact=typeclass)\n\n if include_children:\n # build requests for child typeclass objects\n clsmodule, clsname = typeclass.rsplit(\".\", 1)\n cls = variable_from_module(clsmodule, clsname)\n subclasses = cls.__subclasses__()\n if subclasses:\n for child in (child for child in subclasses if hasattr(child, \"path\")):\n query = query | Q(db_typeclass_path__exact=child.path)\n elif include_parents:\n # build requests for parent typeclass objects\n clsmodule, clsname = typeclass.rsplit(\".\", 1)\n cls = variable_from_module(clsmodule, clsname)\n parents = cls.__mro__\n if parents:\n for parent in (parent for parent in parents if hasattr(parent, \"path\")):\n query = query | Q(db_typeclass_path__exact=parent.path)\n # actually query the database\n return super().filter(query)\n\n\nclass TypeclassManager(TypedObjectManager):\n \"\"\"\n Manager for the typeclasses. The main purpose of this manager is\n to limit database queries to the given typeclass despite all\n typeclasses technically being defined in the same core database\n model.\n\n \"\"\"\n\n # object-manager methods\n def smart_search(self, query):\n \"\"\"\n Search by supplying a string with optional extra search criteria to aid the query.\n\n Args:\n query (str): A search criteria that accepts extra search criteria on the following\n forms:\n\n [key|alias|#dbref...]\n [tag==<tagstr>[:category]...]\n [attr==<key>:<value>:category...]\n\n All three can be combined in the same query, separated by spaces.\n\n Returns:\n matches (queryset): A queryset result matching all queries exactly. If wanting to use\n spaces or ==, != in tags or attributes, enclose them in quotes.\n\n Example:\n house = smart_search(\"key=foo alias=bar tag=house:building tag=magic attr=color:red\")\n\n Note:\n The flexibility of this method is limited by the input line format. Tag/attribute\n matching only works for matching primitives. For even more complex queries, such as\n 'in' operations or object field matching, use the full django query language.\n\n \"\"\"\n # shlex splits by spaces unless escaped by quotes\n querysplit = shlex.split(query)\n queries, plustags, plusattrs, negtags, negattrs = [], [], [], [], []\n for ipart, part in enumerate(querysplit):\n key, rest = part, \"\"\n if \":\" in part:\n key, rest = part.split(\":\", 1)\n # tags are on the form tag or tag:category\n if key.startswith(\"tag==\"):\n plustags.append((key[5:], rest))\n continue\n elif key.startswith(\"tag!=\"):\n negtags.append((key[5:], rest))\n continue\n # attrs are on the form attr:value or attr:value:category\n elif rest:\n value, category = rest, \"\"\n if \":\" in rest:\n value, category = rest.split(\":\", 1)\n if key.startswith(\"attr==\"):\n plusattrs.append((key[7:], value, category))\n continue\n elif key.startswith(\"attr!=\"):\n negattrs.append((key[7:], value, category))\n continue\n # if we get here, we are entering a key search criterion which\n # we assume is one word.\n queries.append(part)\n # build query from components\n query = \" \".join(queries)\n # TODO\n\n def get(self, *args, **kwargs):\n \"\"\"\n Overload the standard get. This will limit itself to only\n return the current typeclass.\n\n Args:\n args (any): These are passed on as arguments to the default\n django get method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django get method\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n kwargs.update({\"db_typeclass_path\": self.model.path})\n return super().get(**kwargs)\n\n def filter(self, *args, **kwargs):\n \"\"\"\n Overload of the standard filter function. This filter will\n limit itself to only the current typeclass.\n\n Args:\n args (any): These are passed on as arguments to the default\n django filter method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django filter method.\n Returns:\n objects (queryset): The objects found.\n\n \"\"\"\n kwargs.update({\"db_typeclass_path\": self.model.path})\n return super().filter(*args, **kwargs)\n\n def all(self):\n \"\"\"\n Overload method to return all matches, filtering for typeclass.\n\n Returns:\n objects (queryset): The objects found.\n\n \"\"\"\n return super().all().filter(db_typeclass_path=self.model.path)\n\n def first(self):\n \"\"\"\n Overload method to return first match, filtering for typeclass.\n\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).first()\n\n def last(self):\n \"\"\"\n Overload method to return last match, filtering for typeclass.\n\n Returns:\n object (object): The object found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).last()\n\n def count(self):\n \"\"\"\n Overload method to return number of matches, filtering for typeclass.\n\n Returns:\n integer : Number of objects found.\n\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).count()\n\n def annotate(self, *args, **kwargs):\n \"\"\"\n Overload annotate method to filter on typeclass before annotating.\n Args:\n *args (any): Positional arguments passed along to queryset annotate method.\n **kwargs (any): Keyword arguments passed along to queryset annotate method.\n\n Returns:\n Annotated queryset.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).annotate(*args, **kwargs)\n\n def values(self, *args, **kwargs):\n \"\"\"\n Overload values method to filter on typeclass first.\n Args:\n *args (any): Positional arguments passed along to values method.\n **kwargs (any): Keyword arguments passed along to values method.\n\n Returns:\n Queryset of values dictionaries, just filtered by typeclass first.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).values(*args, **kwargs)\n\n def values_list(self, *args, **kwargs):\n \"\"\"\n Overload values method to filter on typeclass first.\n Args:\n *args (any): Positional arguments passed along to values_list method.\n **kwargs (any): Keyword arguments passed along to values_list method.\n\n Returns:\n Queryset of value_list tuples, just filtered by typeclass first.\n \"\"\"\n return super().filter(db_typeclass_path=self.model.path).values_list(*args, **kwargs)\n\n def _get_subclasses(self, cls):\n \"\"\"\n Recursively get all subclasses to a class.\n\n Args:\n cls (classoject): A class to get subclasses from.\n \"\"\"\n all_subclasses = cls.__subclasses__()\n for subclass in all_subclasses:\n all_subclasses.extend(self._get_subclasses(subclass))\n return all_subclasses\n\n def get_family(self, *args, **kwargs):\n \"\"\"\n Variation of get that not only returns the current typeclass\n but also all subclasses of that typeclass.\n\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django get method.\n Returns:\n objects (list): The objects found.\n\n Raises:\n ObjectNotFound: The exact name of this exception depends\n on the model base used.\n\n \"\"\"\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n kwargs.update({\"db_typeclass_path__in\": paths})\n return super().get(*args, **kwargs)\n\n def filter_family(self, *args, **kwargs):\n \"\"\"\n Variation of filter that allows results both from typeclass\n and from subclasses of typeclass\n\n Args:\n args (any): These are passed on as arguments to the default\n django filter method.\n Keyword Args:\n kwargs (any): These are passed on as normal arguments\n to the default django filter method.\n Returns:\n objects (list): The objects found.\n\n \"\"\"\n # query, including all subclasses\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n kwargs.update({\"db_typeclass_path__in\": paths})\n return super().filter(*args, **kwargs)\n\n def all_family(self):\n \"\"\"\n Return all matches, allowing matches from all subclasses of\n the typeclass.\n\n Returns:\n objects (list): The objects found.\n\n \"\"\"\n paths = [self.model.path] + [\n \"%s.%s\" % (cls.__module__, cls.__name__) for cls in self._get_subclasses(self.model)\n ]\n return super().all().filter(db_typeclass_path__in=paths)\n", "path": "evennia/typeclasses/managers.py" } ]
diff --git a/evennia/typeclasses/managers.py b/evennia/typeclasses/managers.py index c8ad666e234..e5d1130f123 100644 --- a/evennia/typeclasses/managers.py +++ b/evennia/typeclasses/managers.py @@ -286,7 +286,7 @@ def get_by_tag(self, key=None, category=None, tagtype=None, **kwargs): categories = make_iter(category) if category else [] n_keys = len(keys) n_categories = len(categories) - unique_categories = sorted(set(categories)) + unique_categories = set(categories) n_unique_categories = len(unique_categories) dbmodel = self.model.__dbclass__.__name__.lower() diff --git a/evennia/typeclasses/tests.py b/evennia/typeclasses/tests.py index aea6ce75185..90c4945898b 100644 --- a/evennia/typeclasses/tests.py +++ b/evennia/typeclasses/tests.py @@ -142,6 +142,13 @@ def test_get_tag_with_any(self): [self.obj1], ) + def test_get_tag_with_any_including_nones(self): + self.obj1.tags.add("tagA", "categoryA") + self.assertEqual( + self._manager("get_by_tag", ["tagA", "tagB"], ["categoryA", "categoryB", None], match="any"), + [self.obj1], + ) + def test_get_tag_withnomatch(self): self.obj1.tags.add("tagC", "categoryC") self.assertEqual(
[BUG - Develop] `get_by_tag` category list fails on None #### Describe the bug When using a list for the `category` kwarg that contains an entry of `None` (which is a valid category), it fails with a traceback. Presumably due to the use of `sorted` here <https://github.com/evennia/evennia/blob/develop/evennia/typeclasses/managers.py#L289> #### To Reproduce Steps to reproduce the behavior: 1. Add tags to an object (e.g. a room), both with categories and without. Let's say: ``` tag here = test tag here = tag1:cat1 tag here = tag2:cat2 ``` 2. `Room.objects.get_by_tag(key=["tag1","tag2"], category=["cat1","cat2"])` returns the current room 3. `Room.objects.get_by_tag(key=["tag1","test"], category=["cat1",None])` _should_ return the current room as well, but instead will traceback. #### Expected behavior Since `None` is a valid tag category, the function should be capable of searching for tags with categories of strings _and_ of `None`. #### Develop-branch commit 7f4769bd9 #### Additional context Traceback: ``` File "./evennia/evennia/typeclasses/managers.py", line 289, in get_by_tag unique_categories = sorted(set(categories)) TypeError: '<' not supported between instances of 'NoneType' and 'str' ```
openfun__richie-1960
[ { "content": "\"\"\"\nAPI endpoints for the courses app.\n\"\"\"\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom cms import signals as cms_signals\nfrom cms.models import Page\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import as_serializer_error\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom .exceptions import MissingResourceLinkError\nfrom .lms import LMSHandler\nfrom .models import Course, CourseRun, CourseRunSyncMode\nfrom .serializers import CourseRunSerializer\nfrom .utils import get_signature, normalize_code\n\n\nclass NotAllowed(BasePermission):\n \"\"\"\n Utility permission class to deny all requests. This is used as a default to close\n requests to unsupported actions.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Always deny permission.\n \"\"\"\n return False\n\n\nclass CourseRunsViewSet(ModelViewSet):\n \"\"\"\n API endpoints to access and perform actions on course runs.\n \"\"\"\n\n permission_classes = [NotAllowed]\n queryset = CourseRun.objects.all()\n serializer_class = CourseRunSerializer\n\n def get_permissions(self):\n \"\"\"\n Manage permissions for builtin DRF methods on ViewSets.\n \"\"\"\n if self.action == \"retrieve\":\n permission_classes = []\n else:\n try:\n permission_classes = getattr(self, self.action).kwargs.get(\n \"permission_classes\"\n )\n except AttributeError:\n permission_classes = self.permission_classes\n\n return [permission() for permission in permission_classes]\n\n\n# pylint: disable=too-many-locals,too-many-branches\ndef sync_course_run(data):\n \"\"\" \"\n Synchronize a course run from its data.\n\n Parameters\n ----------\n data : dict\n A dictionary describing a course run of the form:\n {\n \"resource_link\": \"http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/\",\n \"start\": \"2020-12-09T09:31:59.417817Z\",\n \"end\": \"2021-03-14T09:31:59.417895Z\",\n \"enrollment_start\": \"2020-11-09T09:31:59.417936Z\",\n \"enrollment_end\": \"2020-12-24T09:31:59.417972Z\",\n \"languages\": [\"en\", \"fr\"],\n \"enrollment_count\": 42,\n \"catalog_visibility\": \"course_and_search\",\n }\n\n Returns\n -------\n None or raises:\n MissingResourceLinkError: the data dictionary is missing a \"resource_link\" key\n ValidationError: something is wrong in the data. The error dict describes the error.\n\n \"\"\"\n # Select LMS from resource link\n resource_link = data.get(\"resource_link\")\n if not resource_link:\n raise MissingResourceLinkError()\n\n lms = LMSHandler.select_lms(resource_link)\n if lms is None:\n raise ValidationError(\n {\"resource_link\": [\"No LMS configuration found for this resource link.\"]}\n )\n sync_mode = lms.default_course_run_sync_mode\n\n target_course_runs = CourseRun.objects.filter(resource_link=resource_link)\n draft_course_runs = target_course_runs.filter(draft_course_run__isnull=True)\n\n # Clean data before instiating a serializer with it\n cleaned_data = lms.clean_course_run_data(data)\n serializer = lms.get_course_run_serializer(\n cleaned_data, partial=bool(draft_course_runs)\n )\n\n if serializer.is_valid() is not True:\n raise ValidationError(serializer.errors)\n validated_data = serializer.validated_data\n\n if draft_course_runs:\n # Remove fields that are protected for update\n validated_data = {\n key: value\n for (key, value) in validated_data.items()\n if key not in lms.configuration.get(\"COURSE_RUN_SYNC_NO_UPDATE_FIELDS\", [])\n }\n\n for course_run in draft_course_runs.filter(\n sync_mode__in=[\n CourseRunSyncMode.SYNC_TO_DRAFT,\n CourseRunSyncMode.SYNC_TO_PUBLIC,\n ]\n ):\n nb_updated = CourseRun.objects.filter(\n Q(pk=course_run.pk)\n | Q(\n draft_course_run__sync_mode=CourseRunSyncMode.SYNC_TO_PUBLIC,\n draft_course_run=course_run,\n )\n ).update(**validated_data)\n\n public_course = course_run.direct_course.public_extension\n if course_run.sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n if public_course:\n # If the public course run did not exist yet it has to be created\n if nb_updated == 1:\n public_course.copy_relations(course_run.direct_course)\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page,\n instance=course_run.direct_course.extended_object,\n language=None,\n )\n else:\n course_run.refresh_from_db()\n course_run.mark_course_dirty()\n return\n\n # We need to create a new course run\n if lms.default_course_run_sync_mode == CourseRunSyncMode.MANUAL:\n raise ValidationError(\n {\"resource_link\": [\"Unknown course run when creation is deactivated.\"]}\n )\n\n # Look for the course targeted by the resource link\n course_code = normalize_code(lms.extract_course_code(data))\n try:\n course = Course.objects.get(\n code=course_code,\n extended_object__publisher_is_draft=True,\n # Exclude snapshots\n extended_object__node__parent__cms_pages__course__isnull=True,\n )\n except Course.DoesNotExist as exc:\n # Create the course page in draft\n raise ValidationError(\n {\"resource_link\": [f\"Unknown course: {course_code:s}.\"]}\n ) from exc\n\n # Instantiate a new draft course run\n draft_course_run = CourseRun(\n direct_course=course, sync_mode=sync_mode, **validated_data\n )\n\n # Create the related public course run if necessary\n if sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n # Don't mark the related course page dirty and directly add\n # the course run to the corresponding public course page\n draft_course_run.save()\n if course.public_extension_id:\n public_course_run = CourseRun(\n direct_course=course.public_extension,\n draft_course_run=draft_course_run,\n sync_mode=sync_mode,\n **validated_data,\n )\n public_course_run.save()\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page, instance=course.extended_object, language=None\n )\n else:\n # Save the draft course run marking the course page dirty\n draft_course_run.save()\n draft_course_run.mark_course_dirty()\n\n\n# pylint: disable=too-many-return-statements,unused-argument, too-many-locals,too-many-branches\n@api_view([\"POST\"])\ndef sync_course_runs_from_request(request, version):\n \"\"\"View for the web hook to create or update course runs based on their resource link.\n\n - An existing course run is updated only if its \"sync_mode\" field is set to something else\n than \"manual\".\n\n - The public version of a course run is updated only if its \"sync_mode\" field is set to\n \"sync_to_public\". Otherwise, only the draft version is updated and the related course\n is marked dirty.\n\n - A new course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter is set\n to something else than \"manual\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n existing course runs are updated.\n\n - A new public course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter\n is set to \"sync_to_public\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n the draft course run is created and the related course is marked dirty.\n\n Parameters\n ----------\n request : Type[django.http.request.HttpRequest]\n The request on the API endpoint, it should contain a payload with course run fields.\n\n Returns\n -------\n Type[rest_framework.response.Response]\n HttpResponse acknowledging the success or failure of the synchronization operation.\n \"\"\"\n message = request.body.decode(\"utf-8\")\n\n # Check if the provided signature is valid against any secret in our list\n #\n # We need to do this to support 2 or more versions of our infrastructure at the same time.\n # It then enables us to do updates and change the secret without incurring downtime.\n authorization_header = request.headers.get(\"Authorization\")\n if not authorization_header:\n return Response(\"Missing authentication.\", status=403)\n\n signature_is_valid = any(\n authorization_header == get_signature(message, secret)\n for secret in getattr(settings, \"RICHIE_COURSE_RUN_SYNC_SECRETS\", [])\n )\n\n if not signature_is_valid:\n return Response(\"Invalid authentication.\", status=401)\n\n if isinstance(request.data, (list, tuple)):\n result = {}\n status = 200\n if not all(\"resource_link\" in d for d in request.data):\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n for data in request.data:\n try:\n sync_course_run(data)\n except ValidationError as error:\n result[data[\"resource_link\"]] = as_serializer_error(error)\n status = 400\n else:\n result[data[\"resource_link\"]] = {\"success\": True}\n return Response(result, status=status)\n\n try:\n sync_course_run(request.data)\n except MissingResourceLinkError:\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n\n return Response({\"success\": True})\n", "path": "src/richie/apps/courses/api.py" } ]
[ { "content": "\"\"\"\nAPI endpoints for the courses app.\n\"\"\"\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom cms import signals as cms_signals\nfrom cms.models import Page\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import as_serializer_error\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom .exceptions import MissingResourceLinkError\nfrom .lms import LMSHandler\nfrom .models import Course, CourseRun, CourseRunSyncMode\nfrom .serializers import CourseRunSerializer\nfrom .utils import get_signature, normalize_code\n\n\nclass NotAllowed(BasePermission):\n \"\"\"\n Utility permission class to deny all requests. This is used as a default to close\n requests to unsupported actions.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Always deny permission.\n \"\"\"\n return False\n\n\nclass CourseRunsViewSet(ModelViewSet):\n \"\"\"\n API endpoints to access and perform actions on course runs.\n \"\"\"\n\n permission_classes = [NotAllowed]\n queryset = CourseRun.objects.all()\n serializer_class = CourseRunSerializer\n\n def get_permissions(self):\n \"\"\"\n Manage permissions for builtin DRF methods on ViewSets.\n \"\"\"\n if self.action == \"retrieve\":\n permission_classes = []\n else:\n try:\n permission_classes = getattr(self, self.action).kwargs.get(\n \"permission_classes\"\n )\n except AttributeError:\n permission_classes = self.permission_classes\n\n return [permission() for permission in permission_classes]\n\n\n# pylint: disable=too-many-locals,too-many-branches\ndef sync_course_run(data):\n \"\"\" \"\n Synchronize a course run from its data.\n\n Parameters\n ----------\n data : dict\n A dictionary describing a course run of the form:\n {\n \"resource_link\": \"http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/\",\n \"start\": \"2020-12-09T09:31:59.417817Z\",\n \"end\": \"2021-03-14T09:31:59.417895Z\",\n \"enrollment_start\": \"2020-11-09T09:31:59.417936Z\",\n \"enrollment_end\": \"2020-12-24T09:31:59.417972Z\",\n \"languages\": [\"en\", \"fr\"],\n \"enrollment_count\": 42,\n \"catalog_visibility\": \"course_and_search\",\n }\n\n Returns\n -------\n None or raises:\n MissingResourceLinkError: the data dictionary is missing a \"resource_link\" key\n ValidationError: something is wrong in the data. The error dict describes the error.\n\n \"\"\"\n # Select LMS from resource link\n resource_link = data.get(\"resource_link\")\n if not resource_link:\n raise MissingResourceLinkError()\n\n lms = LMSHandler.select_lms(resource_link)\n if lms is None:\n raise ValidationError(\n {\"resource_link\": [\"No LMS configuration found for this resource link.\"]}\n )\n sync_mode = lms.default_course_run_sync_mode\n\n target_course_runs = CourseRun.objects.filter(resource_link=resource_link)\n draft_course_runs = target_course_runs.filter(draft_course_run__isnull=True)\n\n # Clean data before instiating a serializer with it\n cleaned_data = lms.clean_course_run_data(data)\n serializer = lms.get_course_run_serializer(\n cleaned_data, partial=bool(draft_course_runs)\n )\n\n if serializer.is_valid() is not True:\n raise ValidationError(serializer.errors)\n validated_data = serializer.validated_data\n\n if draft_course_runs:\n # Remove fields that are protected for update\n validated_data = {\n key: value\n for (key, value) in validated_data.items()\n if key not in lms.configuration.get(\"COURSE_RUN_SYNC_NO_UPDATE_FIELDS\", [])\n }\n\n for course_run in draft_course_runs.filter(\n sync_mode__in=[\n CourseRunSyncMode.SYNC_TO_DRAFT,\n CourseRunSyncMode.SYNC_TO_PUBLIC,\n ]\n ):\n nb_updated = CourseRun.objects.filter(\n Q(pk=course_run.pk)\n | Q(\n draft_course_run__sync_mode=CourseRunSyncMode.SYNC_TO_PUBLIC,\n draft_course_run=course_run,\n )\n ).update(**validated_data)\n\n public_course = course_run.direct_course.public_extension\n if course_run.sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n if public_course:\n # If the public course run did not exist yet it has to be created\n if nb_updated == 1:\n public_course.copy_relations(course_run.direct_course)\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page,\n instance=course_run.direct_course.extended_object,\n language=None,\n )\n else:\n course_run.refresh_from_db()\n course_run.mark_course_dirty()\n return\n\n # We need to create a new course run\n if lms.default_course_run_sync_mode == CourseRunSyncMode.MANUAL:\n raise ValidationError(\n {\"resource_link\": [\"Unknown course run when creation is deactivated.\"]}\n )\n\n # Look for the course targeted by the resource link\n course_code = normalize_code(lms.extract_course_code(data))\n try:\n course = Course.objects.distinct().get(\n code=course_code,\n extended_object__publisher_is_draft=True,\n # Exclude snapshots\n extended_object__node__parent__cms_pages__course__isnull=True,\n )\n except Course.DoesNotExist as exc:\n # Create the course page in draft\n raise ValidationError(\n {\"resource_link\": [f\"Unknown course: {course_code:s}.\"]}\n ) from exc\n\n # Instantiate a new draft course run\n draft_course_run = CourseRun(\n direct_course=course, sync_mode=sync_mode, **validated_data\n )\n\n # Create the related public course run if necessary\n if sync_mode == CourseRunSyncMode.SYNC_TO_PUBLIC:\n # Don't mark the related course page dirty and directly add\n # the course run to the corresponding public course page\n draft_course_run.save()\n if course.public_extension_id:\n public_course_run = CourseRun(\n direct_course=course.public_extension,\n draft_course_run=draft_course_run,\n sync_mode=sync_mode,\n **validated_data,\n )\n public_course_run.save()\n\n # What we did has changed the public course page. We must reindex it\n cms_signals.post_publish.send(\n sender=Page, instance=course.extended_object, language=None\n )\n else:\n # Save the draft course run marking the course page dirty\n draft_course_run.save()\n draft_course_run.mark_course_dirty()\n\n\n# pylint: disable=too-many-return-statements,unused-argument, too-many-locals,too-many-branches\n@api_view([\"POST\"])\ndef sync_course_runs_from_request(request, version):\n \"\"\"View for the web hook to create or update course runs based on their resource link.\n\n - An existing course run is updated only if its \"sync_mode\" field is set to something else\n than \"manual\".\n\n - The public version of a course run is updated only if its \"sync_mode\" field is set to\n \"sync_to_public\". Otherwise, only the draft version is updated and the related course\n is marked dirty.\n\n - A new course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter is set\n to something else than \"manual\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n existing course runs are updated.\n\n - A new public course run is created only if the \"DEFAULT_COURSE_RUN_SYNC_MODE\" parameter\n is set to \"sync_to_public\" in the lms configuration (or the setting\n \"RICHIE_DEFAULT_COURSE_RUN_SYNC_MODE\" in the absence of LMS preference). Otherwise, only\n the draft course run is created and the related course is marked dirty.\n\n Parameters\n ----------\n request : Type[django.http.request.HttpRequest]\n The request on the API endpoint, it should contain a payload with course run fields.\n\n Returns\n -------\n Type[rest_framework.response.Response]\n HttpResponse acknowledging the success or failure of the synchronization operation.\n \"\"\"\n message = request.body.decode(\"utf-8\")\n\n # Check if the provided signature is valid against any secret in our list\n #\n # We need to do this to support 2 or more versions of our infrastructure at the same time.\n # It then enables us to do updates and change the secret without incurring downtime.\n authorization_header = request.headers.get(\"Authorization\")\n if not authorization_header:\n return Response(\"Missing authentication.\", status=403)\n\n signature_is_valid = any(\n authorization_header == get_signature(message, secret)\n for secret in getattr(settings, \"RICHIE_COURSE_RUN_SYNC_SECRETS\", [])\n )\n\n if not signature_is_valid:\n return Response(\"Invalid authentication.\", status=401)\n\n if isinstance(request.data, (list, tuple)):\n result = {}\n status = 200\n if not all(\"resource_link\" in d for d in request.data):\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n for data in request.data:\n try:\n sync_course_run(data)\n except ValidationError as error:\n result[data[\"resource_link\"]] = as_serializer_error(error)\n status = 400\n else:\n result[data[\"resource_link\"]] = {\"success\": True}\n return Response(result, status=status)\n\n try:\n sync_course_run(request.data)\n except MissingResourceLinkError:\n return Response({\"resource_link\": [\"This field is required.\"]}, status=400)\n\n return Response({\"success\": True})\n", "path": "src/richie/apps/courses/api.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index ecda16668b..eba4e14cf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,11 @@ Versioning](https://semver.org/spec/v2.0.0.html). ## [Unrealeased] +### Fixed + +- Fix error `MultipleObjectsReturned` during synchronization due to missing + `distinct`. + ## [2.21.1] - 2023-04-04 ### Fixed diff --git a/src/richie/apps/courses/api.py b/src/richie/apps/courses/api.py index f4dd83c077..16ebc16d9b 100644 --- a/src/richie/apps/courses/api.py +++ b/src/richie/apps/courses/api.py @@ -160,7 +160,7 @@ def sync_course_run(data): # Look for the course targeted by the resource link course_code = normalize_code(lms.extract_course_code(data)) try: - course = Course.objects.get( + course = Course.objects.distinct().get( code=course_code, extended_object__publisher_is_draft=True, # Exclude snapshots diff --git a/tests/apps/courses/test_api_course_run_sync.py b/tests/apps/courses/test_api_course_run_sync.py index bee474f743..e85948e845 100644 --- a/tests/apps/courses/test_api_course_run_sync.py +++ b/tests/apps/courses/test_api_course_run_sync.py @@ -12,6 +12,7 @@ from cms.signals import post_publish from cms.test_utils.testcases import CMSTestCase +from richie.apps.core.helpers import create_i18n_page from richie.apps.courses.factories import CourseFactory, CourseRunFactory from richie.apps.courses.models import Course, CourseRun from richie.apps.courses.serializers import SyncCourseRunSerializer @@ -1258,3 +1259,82 @@ def test_api_course_run_sync_create_course_run_on_course_page_with_snapshot( self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {"success": True}) self.assertEqual(CourseRun.objects.count(), 1) + + def test_api_course_run_sync_create_course_run_on_published_course_page( + self, mock_signal + ): + """ + When a course run is created on a published course page, the course run should + be created successfully. + """ + CourseFactory(code="DemoX", should_publish=True) + + # Two courses with the same code should be created + Course.objects.get(code="DEMOX", extended_object__publisher_is_draft=True) + Course.objects.get(code="DEMOX", extended_object__publisher_is_draft=False) + self.assertEqual(CourseRun.objects.count(), 0) + + data = { + "resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/", + "start": "2020-12-09T09:31:59.417817Z", + "end": "2021-03-14T09:31:59.417895Z", + "enrollment_start": "2020-11-09T09:31:59.417936Z", + "enrollment_end": "2020-12-24T09:31:59.417972Z", + "languages": ["en", "fr"], + "enrollment_count": 46782, + "catalog_visibility": "course_and_search", + } + + mock_signal.reset_mock() + + authorization = ( + "SIG-HMAC-SHA256 " + "5bdfb326b35fccaef9961e03cf617c359c86ffbb6c64e0f7e074aa011e8af9d6" + ) + response = self.client.post( + "/api/v1.0/course-runs-sync", + data, + content_type="application/json", + HTTP_AUTHORIZATION=authorization, + ) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json(), {"success": True}) + self.assertEqual(CourseRun.objects.count(), 2) + + def test_api_course_with_parent_courses_page(self, mock_signal): + """ + Verify that a course is updated when it has a parent courses page. + """ + mock_signal.reset_mock() + courses_page = create_i18n_page( + "Courses page", + published=True, + reverse_id="courses", + ) + CourseFactory(code="DemoX", page_parent=courses_page, should_publish=False) + + data = { + "resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/", + "start": "2020-12-09T09:31:59.417817Z", + "end": "2021-03-14T09:31:59.417895Z", + "enrollment_start": "2020-11-09T09:31:59.417936Z", + "enrollment_end": "2020-12-24T09:31:59.417972Z", + "languages": ["en", "fr"], + "enrollment_count": 46782, + "catalog_visibility": "course_and_search", + } + + authorization = ( + "SIG-HMAC-SHA256 " + "5bdfb326b35fccaef9961e03cf617c359c86ffbb6c64e0f7e074aa011e8af9d6" + ) + response = self.client.post( + "/api/v1.0/course-runs-sync", + data, + content_type="application/json", + HTTP_AUTHORIZATION=authorization, + ) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json(), {"success": True})
MultipleObjectsReturned error during sync course from LMS to Richie ## Bug Report **Problematic Behavior** Error synchronizing course from LMS to Richie after adding a course code. **Expected behavior/code** Richie API raises: ``` get() returned more than one Course -- it returned 2! ERROR 2023-04-06 17:06:39,973 log 42 140255949047616 Internal Server Error: /api/v1.0/course-runs-sync/ Traceback (most recent call last): File "/usr/local/lib/python3.10/site-packages/django/core/handlers/exception.py", line 47, in inner response = get_response(request) File "/usr/local/lib/python3.10/site-packages/django/core/handlers/base.py", line 181, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/usr/local/lib/python3.10/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view return view_func(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/django/views/generic/base.py", line 70, in view return self.dispatch(request, *args, **kwargs) File "/usr/local/lib/python3.10/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) File "/usr/local/lib/python3.10/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/usr/local/lib/python3.10/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/usr/local/lib/python3.10/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) File "/usr/local/lib/python3.10/site-packages/rest_framework/decorators.py", line 50, in handler return func(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/richie/apps/courses/api.py", line 270, in sync_course_runs_from_request sync_course_run(request.data) File "/usr/local/lib/python3.10/site-packages/richie/apps/courses/api.py", line 163, in sync_course_run course = Course.objects.get( File "/usr/local/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/django/db/models/query.py", line 439, in get raise self.model.MultipleObjectsReturned( richie.apps.courses.models.course.Course.MultipleObjectsReturned: get() returned more than one Course -- it returned 2! ``` **Steps to Reproduce** 1. Create a new course 2. Add a code to the course 3. And then the bug happens! **Environment** - Richie version: `2.21.0` - Platform: Linux **Additional context/Screenshots** When I run this on python shell - `python manage.py shell`: ```python >>> from richie.apps.courses.models import Course >>> course_code='FF_PP' >>> Course.objects.get(code=course_code, extended_object__publisher_is_draft=True, extended_object__node__parent__cms_pages__course__isnull=True, ) Traceback (most recent call last): File "<console>", line 1, in <module> File "/usr/local/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/django/db/models/query.py", line 439, in get raise self.model.MultipleObjectsReturned( richie.apps.courses.models.course.Course.MultipleObjectsReturned: get() returned more than one Course -- it returned 2! ``` URL: https://www.nau.edu.pt/pt/curso/primeiros-passos-em-folha-de-calculo-formulas-e-funcoes-nivel-1/ Nevertheless, I found out that the synchronization is running and the number of course enrollments have been updated automatically.
spotify__luigi-880
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nDefine the centralized register of all :class:`~luigi.task.Task` classes.\n\"\"\"\n\nimport abc\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom luigi import six\nimport logging\nlogger = logging.getLogger('luigi-interface')\n\n\nclass TaskClassException(Exception):\n pass\n\n\nclass Register(abc.ABCMeta):\n \"\"\"\n The Metaclass of :py:class:`Task`.\n\n Acts as a global registry of Tasks with the following properties:\n\n 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the\n same object.\n 2. Keep track of all subclasses of :py:class:`Task` and expose them.\n \"\"\"\n __instance_cache = {}\n _default_namespace = None\n _reg = []\n AMBIGUOUS_CLASS = object() # Placeholder denoting an error\n \"\"\"If this value is returned by :py:meth:`__get_reg` then there is an\n ambiguous task name (two :py:class:`Task` have the same name). This denotes\n an error.\"\"\"\n\n def __new__(metacls, classname, bases, classdict):\n \"\"\"\n Custom class creation for namespacing.\n\n Also register all subclasses.\n\n Set the task namespace to whatever the currently declared namespace is.\n \"\"\"\n if \"task_namespace\" not in classdict:\n classdict[\"task_namespace\"] = metacls._default_namespace\n\n cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)\n metacls._reg.append(cls)\n\n return cls\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Custom class instantiation utilizing instance cache.\n\n If a Task has already been instantiated with the same parameters,\n the previous instance is returned to reduce number of object instances.\n \"\"\"\n def instantiate():\n return super(Register, cls).__call__(*args, **kwargs)\n\n h = cls.__instance_cache\n\n if h is None: # disabled\n return instantiate()\n\n params = cls.get_params()\n param_values = cls.get_param_values(params, args, kwargs)\n\n k = (cls, tuple(param_values))\n\n try:\n hash(k)\n except TypeError:\n logger.debug(\"Not all parameter values are hashable so instance isn't coming from the cache\")\n return instantiate() # unhashable types in parameters\n\n if k not in h:\n h[k] = instantiate()\n\n return h[k]\n\n @classmethod\n def clear_instance_cache(cls):\n \"\"\"\n Clear/Reset the instance cache.\n \"\"\"\n cls.__instance_cache = {}\n\n @classmethod\n def disable_instance_cache(cls):\n \"\"\"\n Disables the instance cache.\n \"\"\"\n cls.__instance_cache = None\n\n @property\n def task_family(cls):\n \"\"\"\n The task family for the given class.\n\n If ``cls.task_namespace is None`` then it's the name of the class.\n Otherwise, ``<task_namespace>.`` is prefixed to the class name.\n \"\"\"\n if cls.task_namespace is None:\n return cls.__name__\n else:\n return \"%s.%s\" % (cls.task_namespace, cls.__name__)\n\n @classmethod\n def __get_reg(cls):\n \"\"\"Return all of the registered classes.\n\n :return: an ``collections.OrderedDict`` of task_family -> class\n \"\"\"\n # We have to do this on-demand in case task names have changed later\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n if cls.run == NotImplemented:\n continue\n name = cls.task_family\n\n if name in reg and reg[name] != cls and \\\n reg[name] != cls.AMBIGUOUS_CLASS and \\\n not issubclass(cls, reg[name]):\n # Registering two different classes - this means we can't instantiate them by name\n # The only exception is if one class is a subclass of the other. In that case, we\n # instantiate the most-derived class (this fixes some issues with decorator wrappers).\n reg[name] = cls.AMBIGUOUS_CLASS\n else:\n reg[name] = cls\n\n return reg\n\n @classmethod\n def task_names(cls):\n \"\"\"\n List of task names as strings\n \"\"\"\n return sorted(cls.__get_reg().keys())\n\n @classmethod\n def tasks_str(cls):\n \"\"\"\n Human-readable register contents dump.\n \"\"\"\n return ','.join(cls.task_names())\n\n @classmethod\n def get_task_cls(cls, name):\n \"\"\"\n Returns an unambiguous class or raises an exception.\n \"\"\"\n task_cls = cls.__get_reg().get(name)\n if not task_cls:\n raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))\n\n if task_cls == cls.AMBIGUOUS_CLASS:\n raise TaskClassException('Task %r is ambiguous' % name)\n return task_cls\n\n @classmethod\n def get_all_params(cls):\n \"\"\"\n Compiles and returns all parameters for all :py:class:`Task`.\n\n :return: a generator of tuples (TODO: we should make this more elegant)\n \"\"\"\n for task_name, task_cls in six.iteritems(cls.__get_reg()):\n if task_cls == cls.AMBIGUOUS_CLASS:\n continue\n for param_name, param_obj in task_cls.get_params():\n yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj\n\n\ndef load_task(module, task_name, params_str):\n \"\"\"\n Imports task dynamically given a module and a task name.\n \"\"\"\n if module is not None:\n __import__(module)\n task_cls = Register.get_task_cls(task_name)\n return task_cls.from_str_params(params_str)\n", "path": "luigi/task_register.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nDefine the centralized register of all :class:`~luigi.task.Task` classes.\n\"\"\"\n\nimport abc\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom luigi import six\nimport logging\nlogger = logging.getLogger('luigi-interface')\n\n\nclass TaskClassException(Exception):\n pass\n\n\nclass Register(abc.ABCMeta):\n \"\"\"\n The Metaclass of :py:class:`Task`.\n\n Acts as a global registry of Tasks with the following properties:\n\n 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the\n same object.\n 2. Keep track of all subclasses of :py:class:`Task` and expose them.\n \"\"\"\n __instance_cache = {}\n _default_namespace = None\n _reg = []\n AMBIGUOUS_CLASS = object() # Placeholder denoting an error\n \"\"\"If this value is returned by :py:meth:`__get_reg` then there is an\n ambiguous task name (two :py:class:`Task` have the same name). This denotes\n an error.\"\"\"\n\n def __new__(metacls, classname, bases, classdict):\n \"\"\"\n Custom class creation for namespacing.\n\n Also register all subclasses.\n\n Set the task namespace to whatever the currently declared namespace is.\n \"\"\"\n if \"task_namespace\" not in classdict:\n classdict[\"task_namespace\"] = metacls._default_namespace\n\n cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)\n metacls._reg.append(cls)\n\n return cls\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Custom class instantiation utilizing instance cache.\n\n If a Task has already been instantiated with the same parameters,\n the previous instance is returned to reduce number of object instances.\n \"\"\"\n def instantiate():\n return super(Register, cls).__call__(*args, **kwargs)\n\n h = cls.__instance_cache\n\n if h is None: # disabled\n return instantiate()\n\n params = cls.get_params()\n param_values = cls.get_param_values(params, args, kwargs)\n\n k = (cls, tuple(param_values))\n\n try:\n hash(k)\n except TypeError:\n logger.debug(\"Not all parameter values are hashable so instance isn't coming from the cache\")\n return instantiate() # unhashable types in parameters\n\n if k not in h:\n h[k] = instantiate()\n\n return h[k]\n\n @classmethod\n def clear_instance_cache(cls):\n \"\"\"\n Clear/Reset the instance cache.\n \"\"\"\n cls.__instance_cache = {}\n\n @classmethod\n def disable_instance_cache(cls):\n \"\"\"\n Disables the instance cache.\n \"\"\"\n cls.__instance_cache = None\n\n @property\n def task_family(cls):\n \"\"\"\n The task family for the given class.\n\n If ``cls.task_namespace is None`` then it's the name of the class.\n Otherwise, ``<task_namespace>.`` is prefixed to the class name.\n \"\"\"\n if cls.task_namespace is None:\n return cls.__name__\n else:\n return \"%s.%s\" % (cls.task_namespace, cls.__name__)\n\n @classmethod\n def __get_reg(cls):\n \"\"\"Return all of the registered classes.\n\n :return: an ``collections.OrderedDict`` of task_family -> class\n \"\"\"\n # We have to do this on-demand in case task names have changed later\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n name = cls.task_family\n\n if name in reg and reg[name] != cls and \\\n reg[name] != cls.AMBIGUOUS_CLASS and \\\n not issubclass(cls, reg[name]):\n # Registering two different classes - this means we can't instantiate them by name\n # The only exception is if one class is a subclass of the other. In that case, we\n # instantiate the most-derived class (this fixes some issues with decorator wrappers).\n reg[name] = cls.AMBIGUOUS_CLASS\n else:\n reg[name] = cls\n\n return reg\n\n @classmethod\n def task_names(cls):\n \"\"\"\n List of task names as strings\n \"\"\"\n return sorted(cls.__get_reg().keys())\n\n @classmethod\n def tasks_str(cls):\n \"\"\"\n Human-readable register contents dump.\n \"\"\"\n return ','.join(cls.task_names())\n\n @classmethod\n def get_task_cls(cls, name):\n \"\"\"\n Returns an unambiguous class or raises an exception.\n \"\"\"\n task_cls = cls.__get_reg().get(name)\n if not task_cls:\n raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))\n\n if task_cls == cls.AMBIGUOUS_CLASS:\n raise TaskClassException('Task %r is ambiguous' % name)\n return task_cls\n\n @classmethod\n def get_all_params(cls):\n \"\"\"\n Compiles and returns all parameters for all :py:class:`Task`.\n\n :return: a generator of tuples (TODO: we should make this more elegant)\n \"\"\"\n for task_name, task_cls in six.iteritems(cls.__get_reg()):\n if task_cls == cls.AMBIGUOUS_CLASS:\n continue\n for param_name, param_obj in task_cls.get_params():\n yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj\n\n\ndef load_task(module, task_name, params_str):\n \"\"\"\n Imports task dynamically given a module and a task name.\n \"\"\"\n if module is not None:\n __import__(module)\n task_cls = Register.get_task_cls(task_name)\n return task_cls.from_str_params(params_str)\n", "path": "luigi/task_register.py" } ]
diff --git a/luigi/task_register.py b/luigi/task_register.py index 858430243c..15d18f7007 100644 --- a/luigi/task_register.py +++ b/luigi/task_register.py @@ -135,8 +135,6 @@ def __get_reg(cls): # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822) reg = OrderedDict() for cls in cls._reg: - if cls.run == NotImplemented: - continue name = cls.task_family if name in reg and reg[name] != cls and \ diff --git a/test/cmdline_test.py b/test/cmdline_test.py index 64e56f5276..229f365c48 100644 --- a/test/cmdline_test.py +++ b/test/cmdline_test.py @@ -53,16 +53,6 @@ class AmbiguousClass(luigi.Task): pass -class NonAmbiguousClass(luigi.ExternalTask): - pass - - -class NonAmbiguousClass(luigi.Task): - - def run(self): - NonAmbiguousClass.has_run = True - - class TaskWithSameName(luigi.Task): def run(self): @@ -115,12 +105,6 @@ def test_cmdline_other_task(self, logger): def test_cmdline_ambiguous_class(self, logger): self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass']) - @mock.patch("logging.getLogger") - @mock.patch("warnings.warn") - def test_cmdline_non_ambiguous_class(self, warn, logger): - luigi.run(['--local-scheduler', '--no-lock', 'NonAmbiguousClass']) - self.assertTrue(NonAmbiguousClass.has_run) - @mock.patch("logging.getLogger") @mock.patch("logging.StreamHandler") def test_setup_interface_logging(self, handler, logger): diff --git a/test/task_test.py b/test/task_test.py index 3a692f7bad..4889108048 100644 --- a/test/task_test.py +++ b/test/task_test.py @@ -21,6 +21,7 @@ import luigi import luigi.task +from luigi.task_register import load_task class DummyTask(luigi.Task): @@ -55,6 +56,10 @@ def test_task_to_str_to_task(self): other = DummyTask.from_str_params(original.to_str_params()) self.assertEqual(original, other) + def test_external_tasks_loadable(self): + task = load_task("luigi", "ExternalTask", {}) + assert(isinstance(task, luigi.ExternalTask)) + def test_id_to_name_and_params(self): task_id = "InputText(date=2014-12-29)" (name, params) = luigi.task.id_to_name_and_params(task_id)
Fix external dynamic deps Since running tasks communicate with worker via a queue, all dynamic dependencies that they yield must be serialized and then deserialized back. This doesn't work if a task has `run = NotImplemented`, since there was a specific check for that in Register for unclear reason. This PR adds a test case to reproduce the issue and fixes it by removing the check.