in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
scrapy__scrapy-1735
KeyError in robotstxt middleware I'm getting these errors in robots.txt middleware: ``` 2016-01-27 16:18:21 [scrapy.core.scraper] ERROR: Error downloading <GET http://yellowpages.co.th> Traceback (most recent call last): File "/Users/kmike/envs/scraping/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred result = f(*args, **kw) File "/Users/kmike/svn/scrapy/scrapy/downloadermiddlewares/robotstxt.py", line 65, in robot_parser if isinstance(self._parsers[netloc], Deferred): KeyError: 'yellowpages.co.th' ``` It looks like https://github.com/scrapy/scrapy/pull/1473 caused it (I can't get this issue in Scrapy 1.0.4, but it present in Scrapy master). It happens when page failed to download and HTTP cache is enabled. I haven't debugged it further.
[ { "content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nimport logging\n\nfrom six.moves.urllib import robotparser\n\nfrom twisted.internet.defer import Deferred, maybeDeferred\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.log import failure_to_exc_info\n\nlogger = logging.getLogger(__name__)\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n d = maybeDeferred(self.robot_parser, request, spider)\n d.addCallback(self.process_request_2, request, spider)\n return d\n\n def process_request_2(self, rp, request, spider):\n if rp is not None and not rp.can_fetch(self._useragent, request.url):\n logger.debug(\"Forbidden by robots.txt: %(request)s\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest()\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n\n if netloc not in self._parsers:\n self._parsers[netloc] = Deferred()\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots, netloc)\n dfd.addErrback(self._logerror, robotsreq, spider)\n dfd.addErrback(self._robots_error, netloc)\n\n if isinstance(self._parsers[netloc], Deferred):\n d = Deferred()\n def cb(result):\n d.callback(result)\n return result\n self._parsers[netloc].addCallback(cb)\n return d\n else:\n return self._parsers[netloc]\n\n def _logerror(self, failure, request, spider):\n if failure.type is not IgnoreRequest:\n logger.error(\"Error downloading %(request)s: %(f_exception)s\",\n {'request': request, 'f_exception': failure.value},\n exc_info=failure_to_exc_info(failure),\n extra={'spider': spider})\n return failure\n\n def _parse_robots(self, response, netloc):\n rp = robotparser.RobotFileParser(response.url)\n body = ''\n if hasattr(response, 'text'):\n body = response.text\n else: # last effort try\n try:\n body = response.body.decode('utf-8')\n except UnicodeDecodeError:\n # If we found garbage, disregard it:,\n # but keep the lookup cached (in self._parsers)\n # Running rp.parse() will set rp state from\n # 'disallow all' to 'allow any'.\n pass\n rp.parse(body.splitlines())\n\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = rp\n rp_dfd.callback(rp)\n\n def _robots_error(self, failure, netloc):\n self._parsers.pop(netloc).callback(None)\n", "path": "scrapy/downloadermiddlewares/robotstxt.py" } ]
[ { "content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nimport logging\n\nfrom six.moves.urllib import robotparser\n\nfrom twisted.internet.defer import Deferred, maybeDeferred\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.log import failure_to_exc_info\n\nlogger = logging.getLogger(__name__)\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n d = maybeDeferred(self.robot_parser, request, spider)\n d.addCallback(self.process_request_2, request, spider)\n return d\n\n def process_request_2(self, rp, request, spider):\n if rp is not None and not rp.can_fetch(self._useragent, request.url):\n logger.debug(\"Forbidden by robots.txt: %(request)s\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest()\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n\n if netloc not in self._parsers:\n self._parsers[netloc] = Deferred()\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots, netloc)\n dfd.addErrback(self._logerror, robotsreq, spider)\n dfd.addErrback(self._robots_error, netloc)\n\n if isinstance(self._parsers[netloc], Deferred):\n d = Deferred()\n def cb(result):\n d.callback(result)\n return result\n self._parsers[netloc].addCallback(cb)\n return d\n else:\n return self._parsers[netloc]\n\n def _logerror(self, failure, request, spider):\n if failure.type is not IgnoreRequest:\n logger.error(\"Error downloading %(request)s: %(f_exception)s\",\n {'request': request, 'f_exception': failure.value},\n exc_info=failure_to_exc_info(failure),\n extra={'spider': spider})\n return failure\n\n def _parse_robots(self, response, netloc):\n rp = robotparser.RobotFileParser(response.url)\n body = ''\n if hasattr(response, 'body_as_unicode'):\n body = response.body_as_unicode()\n else: # last effort try\n try:\n body = response.body.decode('utf-8')\n except UnicodeDecodeError:\n # If we found garbage, disregard it:,\n # but keep the lookup cached (in self._parsers)\n # Running rp.parse() will set rp state from\n # 'disallow all' to 'allow any'.\n pass\n rp.parse(body.splitlines())\n\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = rp\n rp_dfd.callback(rp)\n\n def _robots_error(self, failure, netloc):\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = None\n rp_dfd.callback(None)\n", "path": "scrapy/downloadermiddlewares/robotstxt.py" } ]
diff --git a/scrapy/downloadermiddlewares/robotstxt.py b/scrapy/downloadermiddlewares/robotstxt.py index c061c240777..6fdba90cc7d 100644 --- a/scrapy/downloadermiddlewares/robotstxt.py +++ b/scrapy/downloadermiddlewares/robotstxt.py @@ -101,4 +101,6 @@ def _parse_robots(self, response, netloc): rp_dfd.callback(rp) def _robots_error(self, failure, netloc): - self._parsers.pop(netloc).callback(None) + rp_dfd = self._parsers[netloc] + self._parsers[netloc] = None + rp_dfd.callback(None) diff --git a/tests/test_downloadermiddleware_robotstxt.py b/tests/test_downloadermiddleware_robotstxt.py index 5f45dcb82a1..f2e94e1714a 100644 --- a/tests/test_downloadermiddleware_robotstxt.py +++ b/tests/test_downloadermiddleware_robotstxt.py @@ -123,6 +123,18 @@ def return_failure(request, spider): deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called)) return deferred + def test_robotstxt_immediate_error(self): + self.crawler.settings.set('ROBOTSTXT_OBEY', True) + err = error.DNSLookupError('Robotstxt address not found') + def immediate_failure(request, spider): + deferred = Deferred() + deferred.errback(failure.Failure(err)) + return deferred + self.crawler.engine.download.side_effect = immediate_failure + + middleware = RobotsTxtMiddleware(self.crawler) + return self.assertNotIgnored(Request('http://site.local'), middleware) + def test_ignore_robotstxt_request(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) def ignore_request(request, spider):
apache__airflow-28730
CSRF token should be expire with session ### Apache Airflow version 2.5.0 ### What happened In the default configuration, the CSRF token [expires in one hour](https://pythonhosted.org/Flask-WTF/config.html#forms-and-csrf). This setting leads to frequent errors in the UI – for no good reason. ### What you think should happen instead A short expiration date for the CSRF token is not the right value in my view and I [agree with this answer](https://security.stackexchange.com/a/56520/22108) that the CSRF token should basically never expire, instead pegging itself to the current session. That is, the CSRF token should last as long as the current session. The easiest way to accomplish this is by generating the CSRF token from the session id. ### How to reproduce _No response_ ### Operating System Linux ### Versions of Apache Airflow Providers _No response_ ### Deployment Official Apache Airflow Helm Chart ### Deployment details _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default configuration for the Airflow webserver.\"\"\"\nfrom __future__ import annotations\n\nimport os\n\nfrom airflow.www.fab_security.manager import AUTH_DB\n\n# from airflow.www.fab_security.manager import AUTH_LDAP\n# from airflow.www.fab_security.manager import AUTH_OAUTH\n# from airflow.www.fab_security.manager import AUTH_OID\n# from airflow.www.fab_security.manager import AUTH_REMOTE_USER\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Flask-WTF flag for CSRF\nWTF_CSRF_ENABLED = True\n\n# ----------------------------------------------------\n# AUTHENTICATION CONFIG\n# ----------------------------------------------------\n# For details on how to set up each of the following authentication, see\n# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods\n# for details.\n\n# The authentication type\n# AUTH_OID : Is for OpenID\n# AUTH_DB : Is for database\n# AUTH_LDAP : Is for LDAP\n# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server\n# AUTH_OAUTH : Is for OAuth\nAUTH_TYPE = AUTH_DB\n\n# Uncomment to setup Full admin role name\n# AUTH_ROLE_ADMIN = 'Admin'\n\n# Uncomment and set to desired role to enable access without authentication\n# AUTH_ROLE_PUBLIC = 'Viewer'\n\n# Will allow user self registration\n# AUTH_USER_REGISTRATION = True\n\n# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary\n# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY\n# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY\n\n# Config for Flask-Mail necessary for user self registration\n# MAIL_SERVER = 'smtp.gmail.com'\n# MAIL_USE_TLS = True\n# MAIL_USERNAME = '[email protected]'\n# MAIL_PASSWORD = 'passwordformail'\n# MAIL_DEFAULT_SENDER = '[email protected]'\n\n# The default user self registration role\n# AUTH_USER_REGISTRATION_ROLE = \"Public\"\n\n# When using OAuth Auth, uncomment to setup provider(s) info\n# Google OAuth example:\n# OAUTH_PROVIDERS = [{\n# 'name':'google',\n# 'token_key':'access_token',\n# 'icon':'fa-google',\n# 'remote_app': {\n# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',\n# 'client_kwargs':{\n# 'scope': 'email profile'\n# },\n# 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n# 'request_token_url': None,\n# 'client_id': GOOGLE_KEY,\n# 'client_secret': GOOGLE_SECRET_KEY,\n# }\n# }]\n\n# When using LDAP Auth, setup the ldap server\n# AUTH_LDAP_SERVER = \"ldap://ldapserver.new\"\n\n# When using OpenID Auth, uncomment to setup OpenID providers.\n# example for OpenID authentication\n# OPENID_PROVIDERS = [\n# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },\n# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },\n# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },\n# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]\n\n# ----------------------------------------------------\n# Theme CONFIG\n# ----------------------------------------------------\n# Flask App Builder comes up with a number of predefined themes\n# that you can use for Apache Airflow.\n# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes\n# Please make sure to remove \"navbar_color\" configuration from airflow.cfg\n# in order to fully utilize the theme. (or use that property in conjunction with theme)\n# APP_THEME = \"bootstrap-theme.css\" # default bootstrap\n# APP_THEME = \"amelia.css\"\n# APP_THEME = \"cerulean.css\"\n# APP_THEME = \"cosmo.css\"\n# APP_THEME = \"cyborg.css\"\n# APP_THEME = \"darkly.css\"\n# APP_THEME = \"flatly.css\"\n# APP_THEME = \"journal.css\"\n# APP_THEME = \"lumen.css\"\n# APP_THEME = \"paper.css\"\n# APP_THEME = \"readable.css\"\n# APP_THEME = \"sandstone.css\"\n# APP_THEME = \"simplex.css\"\n# APP_THEME = \"slate.css\"\n# APP_THEME = \"solar.css\"\n# APP_THEME = \"spacelab.css\"\n# APP_THEME = \"superhero.css\"\n# APP_THEME = \"united.css\"\n# APP_THEME = \"yeti.css\"\n", "path": "airflow/config_templates/default_webserver_config.py" } ]
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default configuration for the Airflow webserver.\"\"\"\nfrom __future__ import annotations\n\nimport os\n\nfrom airflow.www.fab_security.manager import AUTH_DB\n\n# from airflow.www.fab_security.manager import AUTH_LDAP\n# from airflow.www.fab_security.manager import AUTH_OAUTH\n# from airflow.www.fab_security.manager import AUTH_OID\n# from airflow.www.fab_security.manager import AUTH_REMOTE_USER\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Flask-WTF flag for CSRF\nWTF_CSRF_ENABLED = True\nWTF_CSRF_TIME_LIMIT = None\n\n# ----------------------------------------------------\n# AUTHENTICATION CONFIG\n# ----------------------------------------------------\n# For details on how to set up each of the following authentication, see\n# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods\n# for details.\n\n# The authentication type\n# AUTH_OID : Is for OpenID\n# AUTH_DB : Is for database\n# AUTH_LDAP : Is for LDAP\n# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server\n# AUTH_OAUTH : Is for OAuth\nAUTH_TYPE = AUTH_DB\n\n# Uncomment to setup Full admin role name\n# AUTH_ROLE_ADMIN = 'Admin'\n\n# Uncomment and set to desired role to enable access without authentication\n# AUTH_ROLE_PUBLIC = 'Viewer'\n\n# Will allow user self registration\n# AUTH_USER_REGISTRATION = True\n\n# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary\n# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY\n# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY\n\n# Config for Flask-Mail necessary for user self registration\n# MAIL_SERVER = 'smtp.gmail.com'\n# MAIL_USE_TLS = True\n# MAIL_USERNAME = '[email protected]'\n# MAIL_PASSWORD = 'passwordformail'\n# MAIL_DEFAULT_SENDER = '[email protected]'\n\n# The default user self registration role\n# AUTH_USER_REGISTRATION_ROLE = \"Public\"\n\n# When using OAuth Auth, uncomment to setup provider(s) info\n# Google OAuth example:\n# OAUTH_PROVIDERS = [{\n# 'name':'google',\n# 'token_key':'access_token',\n# 'icon':'fa-google',\n# 'remote_app': {\n# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',\n# 'client_kwargs':{\n# 'scope': 'email profile'\n# },\n# 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n# 'request_token_url': None,\n# 'client_id': GOOGLE_KEY,\n# 'client_secret': GOOGLE_SECRET_KEY,\n# }\n# }]\n\n# When using LDAP Auth, setup the ldap server\n# AUTH_LDAP_SERVER = \"ldap://ldapserver.new\"\n\n# When using OpenID Auth, uncomment to setup OpenID providers.\n# example for OpenID authentication\n# OPENID_PROVIDERS = [\n# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },\n# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },\n# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },\n# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]\n\n# ----------------------------------------------------\n# Theme CONFIG\n# ----------------------------------------------------\n# Flask App Builder comes up with a number of predefined themes\n# that you can use for Apache Airflow.\n# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes\n# Please make sure to remove \"navbar_color\" configuration from airflow.cfg\n# in order to fully utilize the theme. (or use that property in conjunction with theme)\n# APP_THEME = \"bootstrap-theme.css\" # default bootstrap\n# APP_THEME = \"amelia.css\"\n# APP_THEME = \"cerulean.css\"\n# APP_THEME = \"cosmo.css\"\n# APP_THEME = \"cyborg.css\"\n# APP_THEME = \"darkly.css\"\n# APP_THEME = \"flatly.css\"\n# APP_THEME = \"journal.css\"\n# APP_THEME = \"lumen.css\"\n# APP_THEME = \"paper.css\"\n# APP_THEME = \"readable.css\"\n# APP_THEME = \"sandstone.css\"\n# APP_THEME = \"simplex.css\"\n# APP_THEME = \"slate.css\"\n# APP_THEME = \"solar.css\"\n# APP_THEME = \"spacelab.css\"\n# APP_THEME = \"superhero.css\"\n# APP_THEME = \"united.css\"\n# APP_THEME = \"yeti.css\"\n", "path": "airflow/config_templates/default_webserver_config.py" } ]
diff --git a/airflow/config_templates/default_webserver_config.py b/airflow/config_templates/default_webserver_config.py index ac999a0deafb6..aa22b125fa98c 100644 --- a/airflow/config_templates/default_webserver_config.py +++ b/airflow/config_templates/default_webserver_config.py @@ -32,6 +32,7 @@ # Flask-WTF flag for CSRF WTF_CSRF_ENABLED = True +WTF_CSRF_TIME_LIMIT = None # ---------------------------------------------------- # AUTHENTICATION CONFIG
wagtail__wagtail-9905
Rich text toolbar is hidden by default making some edits difficult ### Issue Summary Prior to Wagtail v4, the rich text toolbar was always visible which made it easy to access the full range of options when editing. Now that the toolbar is hidden by default, it's very difficult. I understand the desire to save screen real estate but it's now nearly impossible for normal editors to discover how to make the same edits they could prior to v4. The simplest example is adding a new `RichTextBlock` and wanting to kick things off with a numbered list. Previously this was just a toolbar button click away. Now one needs to unlearn this and begin to think of lists as a "block" that can be triggered per the "Write something or type '/' to insert a block" prompt. For a slightly more advanced but documented example, the [Extending the Draftail Editor](https://docs.wagtail.org/en/v4.0.2/extending/extending_draftail.html#creating-new-entities) documentation gives an example of how to add a stock chooser. One has to look closely at the animated GIF to figure out that the only way to even access the toolbar in this scenario is to highlight some empty text: ![draftail_entity_stock_source](https://user-images.githubusercontent.com/24574/193920852-94016da0-1b2d-47ce-90fe-e949d38f75b2.gif) The only more slightly discoverable way to trigger the toolbar is to highlight text, but of course that will result in the inline content _replacing_ the highlighted text. In short, it seems the assumption behind the hidden-by-default toolbar is that the only buttons available in the toolbar are for formatting selected text. This issue impacts plugins like [wagtail-footnotes](https://github.com/torchbox/wagtail-footnotes) which expect to be able to insert a footnote inline, anywhere within a sentence. If editors want to insert a footnote mid-sentence, they now have to highlight a space, insert the footnote and then add an additional space so there is still a single space between the footnote link and the following word. (wagtail-footnotes is not fully Wagtail v4 compatible yet but there is work in progress and I noticed this issue when testing) Ideally the toolbar would always display when the input field is in focus. This keeps the rest of the page that is not being actively edited to a minimum while making it extremely easy to access the full set of rich text "features" on the chunk of text you're actively editing. It doesn't really work to add a new line to get the "type '/'" prompt since this would just place the e.g. stock chooser icon or footnote reference on a new line and would have to be manually maneuvered into place.
[ { "content": "from django.conf import settings\nfrom django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext\nfrom django.utils.translation import gettext_lazy as _\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail import __version__, hooks\nfrom wagtail.admin.admin_url_finder import (\n ModelAdminURLFinder,\n register_admin_url_finder,\n)\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.forms.collections import GroupCollectionManagementPermissionFormSet\nfrom wagtail.admin.menu import (\n DismissibleMenuItem,\n DismissibleSubmenuMenuItem,\n MenuItem,\n SubmenuMenuItem,\n help_menu,\n reports_menu,\n settings_menu,\n)\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule,\n PageLinkHandler,\n WhitelistRule,\n)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler,\n ExternalLinkElementHandler,\n HorizontalRuleHandler,\n InlineStyleElementHandler,\n ListElementHandler,\n ListItemElementHandler,\n PageLinkElementHandler,\n)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.ui.sidebar import (\n PageExplorerMenuItem as PageExplorerMenuItemComponent,\n)\nfrom wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\nfrom wagtail.admin.views.pages.bulk_actions import (\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n)\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.models import Collection, Page, Task, UserPagePermissionsProxy, Workflow\nfrom wagtail.permissions import (\n collection_permission_policy,\n task_permission_policy,\n workflow_permission_policy,\n)\nfrom wagtail.templatetags.wagtailcore_tags import (\n wagtail_feature_release_editor_guide_link,\n wagtail_feature_release_whats_new_link,\n)\nfrom wagtail.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context[\"start_page_id\"] = start_page.id\n\n return context\n\n def render_component(self, request):\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n return PageExplorerMenuItemComponent(\n self.name,\n self.label,\n self.url,\n start_page.id,\n icon_name=self.icon_name,\n classnames=self.classnames,\n )\n else:\n return super().render_component(request)\n\n\[email protected](\"register_admin_menu_item\")\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _(\"Pages\"),\n reverse(\"wagtailadmin_explore_root\"),\n name=\"explorer\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n def render_component(self, request):\n return SubMenuItemComponent(\n self.name,\n self.label,\n self.menu.render_component(request),\n icon_name=self.icon_name,\n classnames=self.classnames,\n footer_text=\"Wagtail v\" + __version__,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_settings_menu():\n return SettingsMenuItem(_(\"Settings\"), settings_menu, icon_name=\"cogs\", order=10000)\n\n\[email protected](\"register_permissions\")\ndef register_permissions():\n return Permission.objects.filter(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _(\"Pages\"),\n reverse(\"wagtailadmin_pages:search\"),\n name=\"pages\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected](\"register_admin_search_area\")\ndef register_pages_search_area():\n return PageSearchArea()\n\n\[email protected](\"register_group_permission_panel\")\ndef register_collection_permissions_panel():\n return GroupCollectionManagementPermissionFormSet\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_collections_menu_item():\n return CollectionsMenuItem(\n _(\"Collections\"),\n reverse(\"wagtailadmin_collections:index\"),\n icon_name=\"folder-open-1\",\n order=700,\n )\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return workflow_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return task_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_workflows:index\"),\n icon_name=\"tasks\",\n order=100,\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_workflows:task_index\"),\n icon_name=\"thumbtack\",\n order=150,\n )\n\n\[email protected](\"register_page_listing_buttons\")\ndef page_listing_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _(\"View draft\"),\n reverse(\"wagtailadmin_pages:view_draft\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Preview draft version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n \"rel\": \"noreferrer\",\n },\n priority=20,\n )\n if page.live and page.url:\n yield PageListingButton(\n _(\"View live\"),\n page.url,\n attrs={\n \"rel\": \"noreferrer\",\n \"aria-label\": _(\"View live version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield PageListingButton(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n\n yield ButtonWithDropdownFromHook(\n _(\"More\"),\n hook_name=\"register_page_listing_more_buttons\",\n page=page,\n page_perms=page_perms,\n next_url=next_url,\n attrs={\n \"target\": \"_blank\",\n \"rel\": \"noreferrer\",\n \"title\": _(\"View more options for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=50,\n )\n\n\[email protected](\"register_page_listing_more_buttons\")\ndef page_listing_more_buttons(page, page_perms, next_url=None):\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n if page_perms.can_view_revisions():\n yield Button(\n _(\"History\"),\n reverse(\"wagtailadmin_pages:history\", args=[page.id]),\n attrs={\n \"title\": _(\"View page history for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n\n if page_perms.can_reorder_children():\n yield Button(\n _(\"Sort menu order\"),\n \"?ordering=ord\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n\n\[email protected](\"register_page_header_buttons\")\ndef page_header_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield Button(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n icon_name=\"edit\",\n attrs={\n \"title\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n icon_name=\"arrow-right-full\",\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n icon_name=\"copy\",\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield Button(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n icon_name=\"circle-plus\",\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=40,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url == reverse(\"wagtailadmin_pages:edit\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n icon_name=\"bin\",\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n icon_name=\"download-alt\",\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n if page_perms.can_reorder_children():\n url = reverse(\"wagtailadmin_explore\", args=[page.id])\n url += \"?ordering=ord\"\n yield Button(\n _(\"Sort menu order\"),\n url,\n icon_name=\"list-ul\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=70,\n )\n\n\[email protected](\"register_admin_urls\")\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected](\"register_rich_text_features\")\ndef register_core_features(features):\n features.register_converter_rule(\n \"editorhtml\",\n \"link\",\n [\n WhitelistRule(\"a\", attribute_rule({\"href\": check_url})),\n LinkTypeRule(\"page\", PageLinkHandler),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"bold\",\n [\n WhitelistRule(\"b\", allow_without_attributes),\n WhitelistRule(\"strong\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"italic\",\n [\n WhitelistRule(\"i\", allow_without_attributes),\n WhitelistRule(\"em\", allow_without_attributes),\n ],\n )\n\n headings_elements = [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n for order, element in enumerate(headings_elements):\n features.register_converter_rule(\n \"editorhtml\", element, [WhitelistRule(element, allow_without_attributes)]\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ol\",\n [\n WhitelistRule(\"ol\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ul\",\n [\n WhitelistRule(\"ul\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n # Draftail\n features.register_editor_plugin(\n \"draftail\", \"hr\", draftail_features.BooleanFeature(\"enableHorizontalRule\")\n )\n features.register_converter_rule(\n \"contentstate\",\n \"hr\",\n {\n \"from_database_format\": {\n \"hr\": HorizontalRuleHandler(),\n },\n \"to_database_format\": {\n \"entity_decorators\": {\n \"HORIZONTAL_RULE\": lambda props: DOM.create_element(\"hr\")\n }\n },\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"h1\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h1\",\n \"type\": \"header-one\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 1},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h1\",\n {\n \"from_database_format\": {\n \"h1\": BlockElementHandler(\"header-one\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-one\": \"h1\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h2\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h2\",\n \"type\": \"header-two\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 2},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h2\",\n {\n \"from_database_format\": {\n \"h2\": BlockElementHandler(\"header-two\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-two\": \"h2\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h3\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h3\",\n \"type\": \"header-three\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 3},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h3\",\n {\n \"from_database_format\": {\n \"h3\": BlockElementHandler(\"header-three\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-three\": \"h3\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h4\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h4\",\n \"type\": \"header-four\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 4},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h4\",\n {\n \"from_database_format\": {\n \"h4\": BlockElementHandler(\"header-four\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-four\": \"h4\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h5\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h5\",\n \"type\": \"header-five\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 5},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h5\",\n {\n \"from_database_format\": {\n \"h5\": BlockElementHandler(\"header-five\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-five\": \"h5\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h6\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h6\",\n \"type\": \"header-six\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 6},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h6\",\n {\n \"from_database_format\": {\n \"h6\": BlockElementHandler(\"header-six\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-six\": \"h6\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ul\",\n draftail_features.BlockFeature(\n {\n \"type\": \"unordered-list-item\",\n \"icon\": \"list-ul\",\n \"description\": gettext(\"Bulleted list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ul\",\n {\n \"from_database_format\": {\n \"ul\": ListElementHandler(\"unordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"unordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ul\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ol\",\n draftail_features.BlockFeature(\n {\n \"type\": \"ordered-list-item\",\n \"icon\": \"list-ol\",\n \"description\": gettext(\"Numbered list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ol\",\n {\n \"from_database_format\": {\n \"ol\": ListElementHandler(\"ordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"ordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ol\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"blockquote\",\n draftail_features.BlockFeature(\n {\n \"type\": \"blockquote\",\n \"icon\": \"openquote\",\n \"description\": gettext(\"Blockquote\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"blockquote\",\n {\n \"from_database_format\": {\n \"blockquote\": BlockElementHandler(\"blockquote\"),\n },\n \"to_database_format\": {\"block_map\": {\"blockquote\": \"blockquote\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"bold\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"BOLD\",\n \"icon\": \"bold\",\n \"description\": gettext(\"Bold\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"bold\",\n {\n \"from_database_format\": {\n \"b\": InlineStyleElementHandler(\"BOLD\"),\n \"strong\": InlineStyleElementHandler(\"BOLD\"),\n },\n \"to_database_format\": {\"style_map\": {\"BOLD\": \"b\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"italic\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"ITALIC\",\n \"icon\": \"italic\",\n \"description\": gettext(\"Italic\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"italic\",\n {\n \"from_database_format\": {\n \"i\": InlineStyleElementHandler(\"ITALIC\"),\n \"em\": InlineStyleElementHandler(\"ITALIC\"),\n },\n \"to_database_format\": {\"style_map\": {\"ITALIC\": \"i\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"link\",\n draftail_features.EntityFeature(\n {\n \"type\": \"LINK\",\n \"icon\": \"link\",\n \"description\": gettext(\"Link\"),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n \"attributes\": [\"url\", \"id\", \"parentId\"],\n \"allowlist\": {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n \"href\": \"^(http:|https:|undefined$)\",\n },\n },\n js=[\n \"wagtailadmin/js/page-chooser-modal.js\",\n ],\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"link\",\n {\n \"from_database_format\": {\n \"a[href]\": ExternalLinkElementHandler(\"LINK\"),\n 'a[linktype=\"page\"]': PageLinkElementHandler(\"LINK\"),\n },\n \"to_database_format\": {\"entity_decorators\": {\"LINK\": link_entity}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"superscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUPERSCRIPT\",\n \"icon\": \"superscript\",\n \"description\": gettext(\"Superscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"superscript\",\n {\n \"from_database_format\": {\n \"sup\": InlineStyleElementHandler(\"SUPERSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUPERSCRIPT\": \"sup\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"subscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUBSCRIPT\",\n \"icon\": \"subscript\",\n \"description\": gettext(\"Subscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"subscript\",\n {\n \"from_database_format\": {\n \"sub\": InlineStyleElementHandler(\"SUBSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUBSCRIPT\": \"sub\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"strikethrough\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"STRIKETHROUGH\",\n \"icon\": \"strikethrough\",\n \"description\": gettext(\"Strikethrough\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"strikethrough\",\n {\n \"from_database_format\": {\n \"s\": InlineStyleElementHandler(\"STRIKETHROUGH\"),\n },\n \"to_database_format\": {\"style_map\": {\"STRIKETHROUGH\": \"s\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"code\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"CODE\",\n \"icon\": \"code\",\n \"description\": gettext(\"Code\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"code\",\n {\n \"from_database_format\": {\n \"code\": InlineStyleElementHandler(\"CODE\"),\n },\n \"to_database_format\": {\"style_map\": {\"CODE\": \"code\"}},\n },\n )\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True)\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\nclass AgingPagesReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_AGING_PAGES_ENABLED\", True)\n\n\[email protected](\"register_reports_menu_item\")\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(\n _(\"Locked pages\"),\n reverse(\"wagtailadmin_reports:locked_pages\"),\n icon_name=\"lock\",\n order=700,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_reports:workflow\"),\n icon_name=\"tasks\",\n order=800,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_reports:workflow_tasks\"),\n icon_name=\"thumbtack\",\n order=900,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(\n _(\"Site history\"),\n reverse(\"wagtailadmin_reports:site_history\"),\n icon_name=\"history\",\n order=1000,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_aging_pages_report_menu_item():\n return AgingPagesReportMenuItem(\n _(\"Aging pages\"),\n reverse(\"wagtailadmin_reports:aging_pages\"),\n icon_name=\"time\",\n order=1100,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_reports_menu():\n return SubmenuMenuItem(_(\"Reports\"), reports_menu, icon_name=\"site\", order=9000)\n\n\[email protected](\"register_help_menu_item\")\ndef register_whats_new_in_wagtail_version_menu_item():\n version = \"4.1\"\n return DismissibleMenuItem(\n _(\"What's new in Wagtail %(version)s\") % {\"version\": version},\n wagtail_feature_release_whats_new_link(),\n icon_name=\"help\",\n order=1000,\n attrs={\"target\": \"_blank\", \"rel\": \"noreferrer\"},\n name=f\"whats-new-in-wagtail-{version}\",\n )\n\n\[email protected](\"register_help_menu_item\")\ndef register_editors_guide_menu_item():\n return DismissibleMenuItem(\n _(\"Editor Guide\"),\n wagtail_feature_release_editor_guide_link(),\n icon_name=\"help\",\n order=1100,\n attrs={\"target\": \"_blank\", \"rel\": \"noreferrer\"},\n name=\"editor-guide\",\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_help_menu():\n return DismissibleSubmenuMenuItem(\n _(\"Help\"),\n help_menu,\n icon_name=\"help\",\n order=11000,\n name=\"help\",\n )\n\n\[email protected](\"register_icons\")\ndef register_icons(icons):\n for icon in [\n \"angle-double-left.svg\",\n \"angle-double-right.svg\",\n \"arrow-down-big.svg\",\n \"arrow-down.svg\",\n \"arrow-right-full.svg\",\n \"arrow-left.svg\",\n \"arrow-right.svg\",\n \"arrow-up-big.svg\",\n \"arrow-up.svg\",\n \"arrows-up-down.svg\",\n \"bars.svg\",\n \"bin.svg\",\n \"bold.svg\",\n \"breadcrumb-expand.svg\",\n \"calendar.svg\",\n \"calendar-alt.svg\",\n \"calendar-check.svg\",\n \"chain-broken.svg\",\n \"check.svg\",\n \"chevron-down.svg\",\n \"circle-check.svg\",\n \"circle-plus.svg\",\n \"circle-xmark.svg\",\n \"clipboard-list.svg\",\n \"code.svg\",\n \"cog.svg\",\n \"cogs.svg\",\n \"copy.svg\",\n \"collapse-down.svg\",\n \"collapse-up.svg\",\n \"comment.svg\",\n \"comment-add.svg\",\n \"comment-add-reversed.svg\",\n \"cross.svg\",\n \"cut.svg\",\n \"date.svg\",\n \"desktop.svg\",\n \"doc-empty-inverse.svg\",\n \"doc-empty.svg\",\n \"doc-full-inverse.svg\",\n \"doc-full.svg\", # aka file-text-alt\n \"dots-vertical.svg\",\n \"dots-horizontal.svg\",\n \"download-alt.svg\",\n \"download.svg\",\n \"draft.svg\",\n \"duplicate.svg\",\n \"edit.svg\",\n \"ellipsis-v.svg\",\n \"expand-right.svg\",\n \"error.svg\",\n \"folder-inverse.svg\",\n \"folder-open-1.svg\",\n \"folder-open-inverse.svg\",\n \"folder.svg\",\n \"form.svg\",\n \"globe.svg\",\n \"grip.svg\",\n \"group.svg\",\n \"h1.svg\",\n \"h2.svg\",\n \"h3.svg\",\n \"h4.svg\",\n \"h5.svg\",\n \"h6.svg\",\n \"help.svg\",\n \"history.svg\",\n \"home.svg\",\n \"horizontalrule.svg\",\n \"image.svg\", # aka picture\n \"info-circle.svg\",\n \"italic.svg\",\n \"link.svg\",\n \"link-external.svg\",\n \"list-ol.svg\",\n \"list-ul.svg\",\n \"lock-open.svg\",\n \"lock.svg\",\n \"login.svg\",\n \"logout.svg\",\n \"mail.svg\",\n \"media.svg\",\n \"minus.svg\",\n \"mobile-alt.svg\",\n \"no-view.svg\",\n \"openquote.svg\",\n \"order-down.svg\",\n \"order-up.svg\",\n \"order.svg\",\n \"password.svg\",\n \"pick.svg\",\n \"pilcrow.svg\",\n \"placeholder.svg\", # aka marquee\n \"plus-inverse.svg\",\n \"plus.svg\",\n \"radio-empty.svg\",\n \"radio-full.svg\",\n \"redirect.svg\",\n \"repeat.svg\",\n \"reset.svg\",\n \"resubmit.svg\",\n \"rotate.svg\",\n \"search.svg\",\n \"site.svg\",\n \"snippet.svg\",\n \"spinner.svg\",\n \"strikethrough.svg\",\n \"success.svg\",\n \"subscript.svg\",\n \"superscript.svg\",\n \"table.svg\",\n \"tablet-alt.svg\",\n \"tag.svg\",\n \"tasks.svg\",\n \"thumbtack.svg\",\n \"tick-inverse.svg\",\n \"tick.svg\",\n \"time.svg\",\n \"title.svg\",\n \"undo.svg\",\n \"uni52.svg\", # Is this a redundant icon?\n \"upload.svg\",\n \"user.svg\",\n \"view.svg\",\n \"wagtail-inverse.svg\",\n \"wagtail.svg\",\n \"warning.svg\",\n ]:\n icons.append(\"wagtailadmin/icons/{}\".format(icon))\n return icons\n\n\[email protected](\"construct_homepage_summary_items\")\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\nclass PageAdminURLFinder:\n def __init__(self, user):\n self.page_perms = user and UserPagePermissionsProxy(user)\n\n def get_edit_url(self, instance):\n if self.page_perms and not self.page_perms.for_page(instance).can_edit():\n return None\n else:\n return reverse(\"wagtailadmin_pages:edit\", args=(instance.pk,))\n\n\nregister_admin_url_finder(Page, PageAdminURLFinder)\n\n\nclass CollectionAdminURLFinder(ModelAdminURLFinder):\n permission_policy = collection_permission_policy\n edit_url_name = \"wagtailadmin_collections:edit\"\n\n\nregister_admin_url_finder(Collection, CollectionAdminURLFinder)\n\n\nclass WorkflowAdminURLFinder(ModelAdminURLFinder):\n permission_policy = workflow_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit\"\n\n\nregister_admin_url_finder(Workflow, WorkflowAdminURLFinder)\n\n\nclass WorkflowTaskAdminURLFinder(ModelAdminURLFinder):\n permission_policy = task_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit_task\"\n\n\nregister_admin_url_finder(Task, WorkflowTaskAdminURLFinder)\n\n\nfor action_class in [\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n]:\n hooks.register(\"register_bulk_action\", action_class)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
[ { "content": "from django.conf import settings\nfrom django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext\nfrom django.utils.translation import gettext_lazy as _\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail import __version__, hooks\nfrom wagtail.admin.admin_url_finder import (\n ModelAdminURLFinder,\n register_admin_url_finder,\n)\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.forms.collections import GroupCollectionManagementPermissionFormSet\nfrom wagtail.admin.menu import (\n DismissibleMenuItem,\n DismissibleSubmenuMenuItem,\n MenuItem,\n SubmenuMenuItem,\n help_menu,\n reports_menu,\n settings_menu,\n)\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule,\n PageLinkHandler,\n WhitelistRule,\n)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler,\n ExternalLinkElementHandler,\n HorizontalRuleHandler,\n InlineStyleElementHandler,\n ListElementHandler,\n ListItemElementHandler,\n PageLinkElementHandler,\n)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.ui.sidebar import (\n PageExplorerMenuItem as PageExplorerMenuItemComponent,\n)\nfrom wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\nfrom wagtail.admin.views.pages.bulk_actions import (\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n)\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.models import Collection, Page, Task, UserPagePermissionsProxy, Workflow\nfrom wagtail.permissions import (\n collection_permission_policy,\n task_permission_policy,\n workflow_permission_policy,\n)\nfrom wagtail.templatetags.wagtailcore_tags import (\n wagtail_feature_release_editor_guide_link,\n wagtail_feature_release_whats_new_link,\n)\nfrom wagtail.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context[\"start_page_id\"] = start_page.id\n\n return context\n\n def render_component(self, request):\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n return PageExplorerMenuItemComponent(\n self.name,\n self.label,\n self.url,\n start_page.id,\n icon_name=self.icon_name,\n classnames=self.classnames,\n )\n else:\n return super().render_component(request)\n\n\[email protected](\"register_admin_menu_item\")\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _(\"Pages\"),\n reverse(\"wagtailadmin_explore_root\"),\n name=\"explorer\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n def render_component(self, request):\n return SubMenuItemComponent(\n self.name,\n self.label,\n self.menu.render_component(request),\n icon_name=self.icon_name,\n classnames=self.classnames,\n footer_text=\"Wagtail v\" + __version__,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_settings_menu():\n return SettingsMenuItem(_(\"Settings\"), settings_menu, icon_name=\"cogs\", order=10000)\n\n\[email protected](\"register_permissions\")\ndef register_permissions():\n return Permission.objects.filter(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _(\"Pages\"),\n reverse(\"wagtailadmin_pages:search\"),\n name=\"pages\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected](\"register_admin_search_area\")\ndef register_pages_search_area():\n return PageSearchArea()\n\n\[email protected](\"register_group_permission_panel\")\ndef register_collection_permissions_panel():\n return GroupCollectionManagementPermissionFormSet\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_collections_menu_item():\n return CollectionsMenuItem(\n _(\"Collections\"),\n reverse(\"wagtailadmin_collections:index\"),\n icon_name=\"folder-open-1\",\n order=700,\n )\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return workflow_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return task_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_workflows:index\"),\n icon_name=\"tasks\",\n order=100,\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_workflows:task_index\"),\n icon_name=\"thumbtack\",\n order=150,\n )\n\n\[email protected](\"register_page_listing_buttons\")\ndef page_listing_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _(\"View draft\"),\n reverse(\"wagtailadmin_pages:view_draft\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Preview draft version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n \"rel\": \"noreferrer\",\n },\n priority=20,\n )\n if page.live and page.url:\n yield PageListingButton(\n _(\"View live\"),\n page.url,\n attrs={\n \"rel\": \"noreferrer\",\n \"aria-label\": _(\"View live version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield PageListingButton(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n\n yield ButtonWithDropdownFromHook(\n _(\"More\"),\n hook_name=\"register_page_listing_more_buttons\",\n page=page,\n page_perms=page_perms,\n next_url=next_url,\n attrs={\n \"target\": \"_blank\",\n \"rel\": \"noreferrer\",\n \"title\": _(\"View more options for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=50,\n )\n\n\[email protected](\"register_page_listing_more_buttons\")\ndef page_listing_more_buttons(page, page_perms, next_url=None):\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n if page_perms.can_view_revisions():\n yield Button(\n _(\"History\"),\n reverse(\"wagtailadmin_pages:history\", args=[page.id]),\n attrs={\n \"title\": _(\"View page history for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n\n if page_perms.can_reorder_children():\n yield Button(\n _(\"Sort menu order\"),\n \"?ordering=ord\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n\n\[email protected](\"register_page_header_buttons\")\ndef page_header_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield Button(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n icon_name=\"edit\",\n attrs={\n \"title\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n icon_name=\"arrow-right-full\",\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n icon_name=\"copy\",\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield Button(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n icon_name=\"circle-plus\",\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=40,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url == reverse(\"wagtailadmin_pages:edit\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n icon_name=\"bin\",\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n icon_name=\"download-alt\",\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n if page_perms.can_reorder_children():\n url = reverse(\"wagtailadmin_explore\", args=[page.id])\n url += \"?ordering=ord\"\n yield Button(\n _(\"Sort menu order\"),\n url,\n icon_name=\"list-ul\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=70,\n )\n\n\[email protected](\"register_admin_urls\")\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected](\"register_rich_text_features\")\ndef register_core_features(features):\n features.register_converter_rule(\n \"editorhtml\",\n \"link\",\n [\n WhitelistRule(\"a\", attribute_rule({\"href\": check_url})),\n LinkTypeRule(\"page\", PageLinkHandler),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"bold\",\n [\n WhitelistRule(\"b\", allow_without_attributes),\n WhitelistRule(\"strong\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"italic\",\n [\n WhitelistRule(\"i\", allow_without_attributes),\n WhitelistRule(\"em\", allow_without_attributes),\n ],\n )\n\n headings_elements = [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n for order, element in enumerate(headings_elements):\n features.register_converter_rule(\n \"editorhtml\", element, [WhitelistRule(element, allow_without_attributes)]\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ol\",\n [\n WhitelistRule(\"ol\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ul\",\n [\n WhitelistRule(\"ul\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n # Draftail\n features.register_editor_plugin(\n \"draftail\", \"hr\", draftail_features.BooleanFeature(\"enableHorizontalRule\")\n )\n features.register_converter_rule(\n \"contentstate\",\n \"hr\",\n {\n \"from_database_format\": {\n \"hr\": HorizontalRuleHandler(),\n },\n \"to_database_format\": {\n \"entity_decorators\": {\n \"HORIZONTAL_RULE\": lambda props: DOM.create_element(\"hr\")\n }\n },\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"h1\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h1\",\n \"type\": \"header-one\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 1},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h1\",\n {\n \"from_database_format\": {\n \"h1\": BlockElementHandler(\"header-one\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-one\": \"h1\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h2\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h2\",\n \"type\": \"header-two\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 2},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h2\",\n {\n \"from_database_format\": {\n \"h2\": BlockElementHandler(\"header-two\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-two\": \"h2\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h3\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h3\",\n \"type\": \"header-three\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 3},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h3\",\n {\n \"from_database_format\": {\n \"h3\": BlockElementHandler(\"header-three\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-three\": \"h3\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h4\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h4\",\n \"type\": \"header-four\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 4},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h4\",\n {\n \"from_database_format\": {\n \"h4\": BlockElementHandler(\"header-four\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-four\": \"h4\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h5\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h5\",\n \"type\": \"header-five\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 5},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h5\",\n {\n \"from_database_format\": {\n \"h5\": BlockElementHandler(\"header-five\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-five\": \"h5\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h6\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h6\",\n \"type\": \"header-six\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 6},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h6\",\n {\n \"from_database_format\": {\n \"h6\": BlockElementHandler(\"header-six\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-six\": \"h6\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ul\",\n draftail_features.BlockFeature(\n {\n \"type\": \"unordered-list-item\",\n \"icon\": \"list-ul\",\n \"description\": gettext(\"Bulleted list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ul\",\n {\n \"from_database_format\": {\n \"ul\": ListElementHandler(\"unordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"unordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ul\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ol\",\n draftail_features.BlockFeature(\n {\n \"type\": \"ordered-list-item\",\n \"icon\": \"list-ol\",\n \"description\": gettext(\"Numbered list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ol\",\n {\n \"from_database_format\": {\n \"ol\": ListElementHandler(\"ordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"ordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ol\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"blockquote\",\n draftail_features.BlockFeature(\n {\n \"type\": \"blockquote\",\n \"icon\": \"openquote\",\n \"description\": gettext(\"Blockquote\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"blockquote\",\n {\n \"from_database_format\": {\n \"blockquote\": BlockElementHandler(\"blockquote\"),\n },\n \"to_database_format\": {\"block_map\": {\"blockquote\": \"blockquote\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"bold\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"BOLD\",\n \"icon\": \"bold\",\n \"description\": gettext(\"Bold\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"bold\",\n {\n \"from_database_format\": {\n \"b\": InlineStyleElementHandler(\"BOLD\"),\n \"strong\": InlineStyleElementHandler(\"BOLD\"),\n },\n \"to_database_format\": {\"style_map\": {\"BOLD\": \"b\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"italic\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"ITALIC\",\n \"icon\": \"italic\",\n \"description\": gettext(\"Italic\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"italic\",\n {\n \"from_database_format\": {\n \"i\": InlineStyleElementHandler(\"ITALIC\"),\n \"em\": InlineStyleElementHandler(\"ITALIC\"),\n },\n \"to_database_format\": {\"style_map\": {\"ITALIC\": \"i\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"link\",\n draftail_features.EntityFeature(\n {\n \"type\": \"LINK\",\n \"icon\": \"link\",\n \"description\": gettext(\"Link\"),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n \"attributes\": [\"url\", \"id\", \"parentId\"],\n \"allowlist\": {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n \"href\": \"^(http:|https:|undefined$)\",\n },\n },\n js=[\n \"wagtailadmin/js/page-chooser-modal.js\",\n ],\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"link\",\n {\n \"from_database_format\": {\n \"a[href]\": ExternalLinkElementHandler(\"LINK\"),\n 'a[linktype=\"page\"]': PageLinkElementHandler(\"LINK\"),\n },\n \"to_database_format\": {\"entity_decorators\": {\"LINK\": link_entity}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"superscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUPERSCRIPT\",\n \"icon\": \"superscript\",\n \"description\": gettext(\"Superscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"superscript\",\n {\n \"from_database_format\": {\n \"sup\": InlineStyleElementHandler(\"SUPERSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUPERSCRIPT\": \"sup\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"subscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUBSCRIPT\",\n \"icon\": \"subscript\",\n \"description\": gettext(\"Subscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"subscript\",\n {\n \"from_database_format\": {\n \"sub\": InlineStyleElementHandler(\"SUBSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUBSCRIPT\": \"sub\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"strikethrough\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"STRIKETHROUGH\",\n \"icon\": \"strikethrough\",\n \"description\": gettext(\"Strikethrough\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"strikethrough\",\n {\n \"from_database_format\": {\n \"s\": InlineStyleElementHandler(\"STRIKETHROUGH\"),\n },\n \"to_database_format\": {\"style_map\": {\"STRIKETHROUGH\": \"s\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"code\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"CODE\",\n \"icon\": \"code\",\n \"description\": gettext(\"Code\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"code\",\n {\n \"from_database_format\": {\n \"code\": InlineStyleElementHandler(\"CODE\"),\n },\n \"to_database_format\": {\"style_map\": {\"CODE\": \"code\"}},\n },\n )\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True)\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\nclass AgingPagesReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_AGING_PAGES_ENABLED\", True)\n\n\[email protected](\"register_reports_menu_item\")\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(\n _(\"Locked pages\"),\n reverse(\"wagtailadmin_reports:locked_pages\"),\n icon_name=\"lock\",\n order=700,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_reports:workflow\"),\n icon_name=\"tasks\",\n order=800,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_reports:workflow_tasks\"),\n icon_name=\"thumbtack\",\n order=900,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(\n _(\"Site history\"),\n reverse(\"wagtailadmin_reports:site_history\"),\n icon_name=\"history\",\n order=1000,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_aging_pages_report_menu_item():\n return AgingPagesReportMenuItem(\n _(\"Aging pages\"),\n reverse(\"wagtailadmin_reports:aging_pages\"),\n icon_name=\"time\",\n order=1100,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_reports_menu():\n return SubmenuMenuItem(_(\"Reports\"), reports_menu, icon_name=\"site\", order=9000)\n\n\[email protected](\"register_help_menu_item\")\ndef register_whats_new_in_wagtail_version_menu_item():\n version = \"4.1\"\n return DismissibleMenuItem(\n _(\"What's new in Wagtail %(version)s\") % {\"version\": version},\n wagtail_feature_release_whats_new_link(),\n icon_name=\"help\",\n order=1000,\n attrs={\"target\": \"_blank\", \"rel\": \"noreferrer\"},\n name=f\"whats-new-in-wagtail-{version}\",\n )\n\n\[email protected](\"register_help_menu_item\")\ndef register_editors_guide_menu_item():\n return DismissibleMenuItem(\n _(\"Editor Guide\"),\n wagtail_feature_release_editor_guide_link(),\n icon_name=\"help\",\n order=1100,\n attrs={\"target\": \"_blank\", \"rel\": \"noreferrer\"},\n name=\"editor-guide\",\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_help_menu():\n return DismissibleSubmenuMenuItem(\n _(\"Help\"),\n help_menu,\n icon_name=\"help\",\n order=11000,\n name=\"help\",\n )\n\n\[email protected](\"register_icons\")\ndef register_icons(icons):\n for icon in [\n \"angle-double-left.svg\",\n \"angle-double-right.svg\",\n \"arrow-down-big.svg\",\n \"arrow-down.svg\",\n \"arrow-right-full.svg\",\n \"arrow-left.svg\",\n \"arrow-right.svg\",\n \"arrow-up-big.svg\",\n \"arrow-up.svg\",\n \"arrows-up-down.svg\",\n \"bars.svg\",\n \"bin.svg\",\n \"bold.svg\",\n \"breadcrumb-expand.svg\",\n \"calendar.svg\",\n \"calendar-alt.svg\",\n \"calendar-check.svg\",\n \"chain-broken.svg\",\n \"check.svg\",\n \"chevron-down.svg\",\n \"circle-check.svg\",\n \"circle-plus.svg\",\n \"circle-xmark.svg\",\n \"clipboard-list.svg\",\n \"code.svg\",\n \"cog.svg\",\n \"cogs.svg\",\n \"copy.svg\",\n \"collapse-down.svg\",\n \"collapse-up.svg\",\n \"comment.svg\",\n \"comment-add.svg\",\n \"comment-add-reversed.svg\",\n \"cross.svg\",\n \"cut.svg\",\n \"date.svg\",\n \"desktop.svg\",\n \"doc-empty-inverse.svg\",\n \"doc-empty.svg\",\n \"doc-full-inverse.svg\",\n \"doc-full.svg\", # aka file-text-alt\n \"dots-vertical.svg\",\n \"dots-horizontal.svg\",\n \"download-alt.svg\",\n \"download.svg\",\n \"draft.svg\",\n \"duplicate.svg\",\n \"edit.svg\",\n \"ellipsis-v.svg\",\n \"expand-right.svg\",\n \"error.svg\",\n \"folder-inverse.svg\",\n \"folder-open-1.svg\",\n \"folder-open-inverse.svg\",\n \"folder.svg\",\n \"form.svg\",\n \"globe.svg\",\n \"grip.svg\",\n \"group.svg\",\n \"h1.svg\",\n \"h2.svg\",\n \"h3.svg\",\n \"h4.svg\",\n \"h5.svg\",\n \"h6.svg\",\n \"help.svg\",\n \"history.svg\",\n \"home.svg\",\n \"horizontalrule.svg\",\n \"image.svg\", # aka picture\n \"info-circle.svg\",\n \"italic.svg\",\n \"link.svg\",\n \"link-external.svg\",\n \"list-ol.svg\",\n \"list-ul.svg\",\n \"lock-open.svg\",\n \"lock.svg\",\n \"login.svg\",\n \"logout.svg\",\n \"mail.svg\",\n \"media.svg\",\n \"minus.svg\",\n \"mobile-alt.svg\",\n \"no-view.svg\",\n \"openquote.svg\",\n \"order-down.svg\",\n \"order-up.svg\",\n \"order.svg\",\n \"password.svg\",\n \"pick.svg\",\n \"pilcrow.svg\",\n \"placeholder.svg\", # aka marquee\n \"plus-inverse.svg\",\n \"plus.svg\",\n \"radio-empty.svg\",\n \"radio-full.svg\",\n \"redirect.svg\",\n \"repeat.svg\",\n \"reset.svg\",\n \"resubmit.svg\",\n \"rotate.svg\",\n \"search.svg\",\n \"site.svg\",\n \"snippet.svg\",\n \"spinner.svg\",\n \"strikethrough.svg\",\n \"success.svg\",\n \"subscript.svg\",\n \"superscript.svg\",\n \"table.svg\",\n \"tablet-alt.svg\",\n \"tag.svg\",\n \"tasks.svg\",\n \"thumbtack.svg\",\n \"thumbtack-crossed.svg\",\n \"tick-inverse.svg\",\n \"tick.svg\",\n \"time.svg\",\n \"title.svg\",\n \"undo.svg\",\n \"uni52.svg\", # Is this a redundant icon?\n \"upload.svg\",\n \"user.svg\",\n \"view.svg\",\n \"wagtail-inverse.svg\",\n \"wagtail.svg\",\n \"warning.svg\",\n ]:\n icons.append(\"wagtailadmin/icons/{}\".format(icon))\n return icons\n\n\[email protected](\"construct_homepage_summary_items\")\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\nclass PageAdminURLFinder:\n def __init__(self, user):\n self.page_perms = user and UserPagePermissionsProxy(user)\n\n def get_edit_url(self, instance):\n if self.page_perms and not self.page_perms.for_page(instance).can_edit():\n return None\n else:\n return reverse(\"wagtailadmin_pages:edit\", args=(instance.pk,))\n\n\nregister_admin_url_finder(Page, PageAdminURLFinder)\n\n\nclass CollectionAdminURLFinder(ModelAdminURLFinder):\n permission_policy = collection_permission_policy\n edit_url_name = \"wagtailadmin_collections:edit\"\n\n\nregister_admin_url_finder(Collection, CollectionAdminURLFinder)\n\n\nclass WorkflowAdminURLFinder(ModelAdminURLFinder):\n permission_policy = workflow_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit\"\n\n\nregister_admin_url_finder(Workflow, WorkflowAdminURLFinder)\n\n\nclass WorkflowTaskAdminURLFinder(ModelAdminURLFinder):\n permission_policy = task_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit_task\"\n\n\nregister_admin_url_finder(Task, WorkflowTaskAdminURLFinder)\n\n\nfor action_class in [\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n]:\n hooks.register(\"register_bulk_action\", action_class)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
diff --git a/CHANGELOG.txt b/CHANGELOG.txt index bb125df699e7..c8702de48f44 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -25,6 +25,8 @@ Changelog * Implement latest design for the admin dashboard header (Thibaud Colas, Steven Steinwand) * Add base Axe accessibility checker integration within userbar, with error count (Albina Starykova) * Allow configuring Axe accessibility checker integration via `construct_wagtail_userbar` hook (Sage Abdullah) + * Support pinning and un-pinning the rich text editor toolbar depending on user preference (Thibaud Colas) + * Make the rich text block trigger and slash-commands always available regardless of where the cursor is (Thibaud Colas) * Fix: Make sure workflow timeline icons are visible in high-contrast mode (Loveth Omokaro) * Fix: Ensure authentication forms (login, password reset) have a visible border in Windows high-contrast mode (Loveth Omokaro) * Fix: Ensure visual consistency between buttons and links as buttons in Windows high-contrast mode (Albina Starykova) @@ -63,6 +65,9 @@ Changelog * Fix: Resolve issue where workflow and other notification emails would not include the correct tab URL for account notification management (LB (Ben) Johnston) * Fix: Use consistent spacing above and below page headers (Thibaud Colas) * Fix: Use the correct icon sizes and spacing in slim header (Thibaud Colas) + * Fix: Use the correct color for placeholders in rich text fields (Thibaud Colas) + * Fix: Prevent obstructing the outline around rich text fields (Thibaud Colas) + * Fix: Page editor dropdowns now use indigo backgrounds like elsewhere in the admin interface (Thibaud Colas) * Docs: Add custom permissions section to permissions documentation page (Dan Hayden) * Docs: Add documentation for how to get started with contributing translations for the Wagtail admin (Ogunbanjo Oluwadamilare) * Docs: Officially recommend `fnm` over `nvm` in development documentation (LB (Ben) Johnston) diff --git a/CONTRIBUTORS.rst b/CONTRIBUTORS.rst index 55e3b4e7537c..b1d6eaebc019 100644 --- a/CONTRIBUTORS.rst +++ b/CONTRIBUTORS.rst @@ -682,6 +682,7 @@ Contributors * Jhonatan Lopes * Alex Simpson * GLEF1X +* Nick Lee Translators =========== diff --git a/client/src/components/Draftail/Draftail.scss b/client/src/components/Draftail/Draftail.scss index 7329b5d013be..9f6bc5f56239 100644 --- a/client/src/components/Draftail/Draftail.scss +++ b/client/src/components/Draftail/Draftail.scss @@ -1,4 +1,5 @@ $draftail-editor-text: $color-input-text; +$draftail-placeholder-text: theme('colors.grey.400'); // w-body-text-large $draftail-editor-font-size: theme('fontSize.18'); $draftail-editor-line-height: theme('lineHeight.normal'); @@ -76,9 +77,14 @@ $draftail-editor-font-family: $font-sans; } .Draftail-Editor { + @include input-base(); // Number used inside a `calc` function, which doesn’t support unitless zero. // stylelint-disable-next-line length-zero-no-unit --draftail-offset-inline-start: 0px; + + &--focus { + outline: $focus-outline-width solid $color-focus-outline; + } } .Draftail-Editor__wrapper { @@ -107,13 +113,37 @@ $draftail-editor-font-family: $font-sans; } .Draftail-Toolbar { - border: 1px solid $color-grey-3; + border-width: 0; + // Remove once we drop support for Safari 14. + // stylelint-disable-next-line property-disallowed-list + border-bottom-left-radius: 0; + border-end-start-radius: 0; + // Remove once we drop support for Safari 14. + // stylelint-disable-next-line property-disallowed-list + border-bottom-right-radius: 0; + border-end-end-radius: 0; + background-color: $draftail-editor-background; + color: $draftail-placeholder-text; + + .Draftail-Editor--focus & { + color: $draftail-editor-text; + top: calc(theme('spacing.slim-header') * 2); + + @include media-breakpoint-up(sm) { + top: theme('spacing.slim-header'); + } + } } .Draftail-MetaToolbar { position: absolute; inset-inline-end: 0; visibility: hidden; + background-color: transparent; + + &:empty { + display: none; + } // Make sure the toolbar is always visible for devices that do not hover. @media (hover: hover) { @@ -233,13 +263,6 @@ $draftail-editor-font-family: $font-sans; display: none; } -.Draftail-ToolbarGroup--controls::before { - display: inline-block; - height: 1.875rem; - background-color: $color-white; - opacity: 0.2; -} - .Draftail-ToolbarButton { height: 1.875rem; min-width: 1.875rem; @@ -260,9 +283,29 @@ $draftail-editor-font-family: $font-sans; } } -.Draftail-Editor__wrapper .public-DraftEditor-content { - @include input-base(); - @include show-focus-outline-inside(); +.Draftail-ToolbarButton--pin { + min-width: theme('spacing.6'); + height: theme('spacing.6'); + border: 1px solid theme('colors.primary.DEFAULT'); + + &:hover { + border-color: theme('colors.primary.DEFAULT'); + } + + .Draftail-Toolbar & { + border-color: $color-input-border; + background-color: theme('colors.grey.50'); + border-top-width: 0; + border-inline-end-width: 0; + + .Draftail-Editor:hover & { + border-color: $color-input-hover-border; + } + } + + .icon { + transform: rotate(30deg); + } } .Draftail-block--blockquote { diff --git a/client/src/components/Draftail/index.js b/client/src/components/Draftail/index.js index 99aafed20b73..f8ec2ae977c9 100644 --- a/client/src/components/Draftail/index.js +++ b/client/src/components/Draftail/index.js @@ -38,6 +38,49 @@ const BR_ICON = 'M.436 633.471l296.897-296.898v241.823h616.586V94.117h109.517v593.796H297.333v242.456z'; const ADD_ICON = <Icon name="plus" />; +const pinButton = { + floatingIcon: <Icon name="thumbtack" />, + stickyIcon: <Icon name="thumbtack-crossed" />, + floatingDescription: gettext('Pin toolbar'), + stickyDescription: gettext('Unpin toolbar'), +}; + +const getSavedToolbar = () => { + let saved = 'floating'; + try { + saved = localStorage.getItem('wagtail:draftail-toolbar') || saved; + } catch { + // Use the default if localStorage isn’t available. + } + return saved; +}; + +/** + * Scroll to keep the field on the same spot when switching toolbars, + * and save the choice in localStorage. + */ +const onSetToolbar = (choice, callback) => { + const activeEditor = document.activeElement; + const before = activeEditor.getBoundingClientRect().top; + callback(choice); + + // Delay scrolling until reflow has been fully computed. + requestAnimationFrame(() => { + const after = activeEditor.getBoundingClientRect().top; + const scrollArea = document.querySelector('#main'); + scrollArea.scrollBy({ + // Scroll by a positive amount if the editor moved down, negative if up. + top: after - before, + behavior: 'instant', + }); + }); + try { + localStorage.setItem('wagtail:draftail-toolbar', choice); + } catch { + // Skip saving the preference if localStorage isn’t available. + } +}; + /** * Registry for client-side code of Draftail plugins. */ @@ -153,7 +196,12 @@ const initEditor = (selector, originalOptions, currentScript) => { comboPlaceholder={gettext('Search blocks')} noResultsText={gettext('No results')} /> - <InlineToolbar {...props} /> + <InlineToolbar + {...props} + pinButton={pinButton} + defaultToolbar={getSavedToolbar()} + onSetToolbar={onSetToolbar} + /> </> ), bottomToolbar: MetaToolbar, diff --git a/client/src/entrypoints/admin/telepath/widgets.js b/client/src/entrypoints/admin/telepath/widgets.js index 34a0d5613e5f..499c366d15eb 100644 --- a/client/src/entrypoints/admin/telepath/widgets.js +++ b/client/src/entrypoints/admin/telepath/widgets.js @@ -197,9 +197,8 @@ class DraftailInsertBlockCommand { } onSelect({ editorState }) { - // Reset the current block to unstyled and empty before splitting, so we remove the command prompt if used. const result = window.draftail.splitState( - window.draftail.DraftUtils.resetBlockWithType(editorState, 'unstyled'), + window.draftail.DraftUtils.removeCommandPalettePrompt(editorState), ); if (result.stateAfter.getCurrentContent().hasText()) { // There is content after the insertion point, so need to split the existing block. @@ -246,7 +245,7 @@ class DraftailSplitCommand { onSelect({ editorState }) { const result = window.draftail.splitState( - window.draftail.DraftUtils.resetBlockWithType(editorState, 'unstyled'), + window.draftail.DraftUtils.removeCommandPalettePrompt(editorState), ); // Run the split after a timeout to circumvent potential race condition. setTimeout(() => { diff --git a/docs/releases/4.0.md b/docs/releases/4.0.md index b55b54aa5309..af43e68a7bdc 100644 --- a/docs/releases/4.0.md +++ b/docs/releases/4.0.md @@ -37,6 +37,8 @@ Following from Wagtail 3.0, this release contains significant UI changes that af Further updates to the page editor are expected in the next release. Those changes were implemented by Thibaud Colas. Development on this feature was sponsored by Google. +(rich_text_improvements_4)= + ### Rich text improvements As part of the page editor redesign project sponsored by Google, we have made a number of improvements to our rich text editor: diff --git a/docs/releases/4.2.md b/docs/releases/4.2.md index 72457b4c8378..0698e0291647 100644 --- a/docs/releases/4.2.md +++ b/docs/releases/4.2.md @@ -27,6 +27,16 @@ Wagtail now provides a `fullpageurl` template tag (for both Django templates and This feature was developed by Jake Howard. +### Rich text improvements + +Following feedback from Wagtail users on [rich text UI improvements in Wagtail 4.0](rich_text_improvements_4), we have further refined the behavior of rich text fields to cater for different scenarios: + +- Users can now choose between an “inline” floating toolbar, and a fixed toolbar at the top of the editor. Both toolbars display all formatting options. +- The ‘/’ command palette and block picker in rich text fields now contain all formatting options except text styles. +- The ‘/’ command palette and block picker are now always available no matter where the cursor is placed, to support inserting content at any point within text, transforming existing content, and splitting StreamField blocks in the middle of a paragraph when needed. + +Thank you to all who provided feedback, participants to our usability testing sessions, and to Nick Lee and Thibaud Colas for the implementation. + ### Other features * Test assertion [`WagtailPageTestCase.assertCanCreate`](testing_reference) now supports the kwarg `publish=True` to check publish redirection (Harry Percival, Akua Dokua Asiedu) @@ -87,6 +97,9 @@ This feature was developed by Jake Howard. * Resolve issue where workflow and other notification emails would not include the correct tab URL for account notification management (LB (Ben) Johnston) * Use consistent spacing above and below page headers (Thibaud Colas) * Use the correct icon sizes and spacing in slim header (Thibaud Colas) + * Use the correct color for placeholders in rich text fields (Thibaud Colas) + * Prevent obstructing the outline around rich text fields (Thibaud Colas) + * Page editor dropdowns now use indigo backgrounds like elsewhere in the admin interface (Thibaud Colas) ### Documentation diff --git a/package-lock.json b/package-lock.json index a30ff034f852..e386ee7eaa20 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "a11y-dialog": "^7.4.0", "axe-core": "^4.6.2", "draft-js": "^0.10.5", - "draftail": "^2.0.0-rc.2", + "draftail": "^2.0.0-rc.5", "draftjs-filters": "^3.0.1", "focus-trap-react": "^8.4.2", "immer": "^9.0.6", @@ -16540,6 +16540,7 @@ }, "node_modules/compute-scroll-into-view": { "version": "1.0.17", + "dev": true, "license": "MIT" }, "node_modules/concat-map": { @@ -17962,6 +17963,7 @@ }, "node_modules/downshift": { "version": "6.1.7", + "dev": true, "license": "MIT", "dependencies": { "@babel/runtime": "^7.14.8", @@ -18004,13 +18006,13 @@ } }, "node_modules/draftail": { - "version": "2.0.0-rc.2", - "resolved": "https://registry.npmjs.org/draftail/-/draftail-2.0.0-rc.2.tgz", - "integrity": "sha512-3KNMXv54k0yxAoOk8Ho9m/YRxJxArv7VwS/3X1yX0Xi2dUzvRzvYW5piGMSIX6vgYoWSN9p5bM+XtytciL93ig==", + "version": "2.0.0-rc.5", + "resolved": "https://registry.npmjs.org/draftail/-/draftail-2.0.0-rc.5.tgz", + "integrity": "sha512-t4o+483o7DY+7taNP6adgh2FAp4VBi0WxcteilPZdRZaotv3ePsLV5TPtfLiQtS4KGgGyP+RiGmPfPjJ/Ycbvg==", "dependencies": { "@tippyjs/react": "^4.2.6", "decorate-component-with-props": "^1.0.2", - "downshift": "^6.1.7", + "downshift": "^7.0.4", "draft-js-plugins-editor": "^2.1.1", "draftjs-conductor": "^3.0.0", "draftjs-filters": "^3.0.1" @@ -18021,6 +18023,26 @@ "react-dom": "^16.6.0" } }, + "node_modules/draftail/node_modules/compute-scroll-into-view": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-2.0.4.tgz", + "integrity": "sha512-y/ZA3BGnxoM/QHHQ2Uy49CLtnWPbt4tTPpEEZiEmmiWBFKjej7nEyH8Ryz54jH0MLXflUYA3Er2zUxPSJu5R+g==" + }, + "node_modules/draftail/node_modules/downshift": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", + "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "dependencies": { + "@babel/runtime": "^7.14.8", + "compute-scroll-into-view": "^2.0.4", + "prop-types": "^15.7.2", + "react-is": "^17.0.2", + "tslib": "^2.3.0" + }, + "peerDependencies": { + "react": ">=16.12.0" + } + }, "node_modules/draftjs-conductor": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/draftjs-conductor/-/draftjs-conductor-3.0.0.tgz", @@ -43078,7 +43100,8 @@ } }, "compute-scroll-into-view": { - "version": "1.0.17" + "version": "1.0.17", + "dev": true }, "concat-map": { "version": "0.0.1", @@ -44064,6 +44087,7 @@ }, "downshift": { "version": "6.1.7", + "dev": true, "requires": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^1.0.17", @@ -44091,16 +44115,35 @@ } }, "draftail": { - "version": "2.0.0-rc.2", - "resolved": "https://registry.npmjs.org/draftail/-/draftail-2.0.0-rc.2.tgz", - "integrity": "sha512-3KNMXv54k0yxAoOk8Ho9m/YRxJxArv7VwS/3X1yX0Xi2dUzvRzvYW5piGMSIX6vgYoWSN9p5bM+XtytciL93ig==", + "version": "2.0.0-rc.5", + "resolved": "https://registry.npmjs.org/draftail/-/draftail-2.0.0-rc.5.tgz", + "integrity": "sha512-t4o+483o7DY+7taNP6adgh2FAp4VBi0WxcteilPZdRZaotv3ePsLV5TPtfLiQtS4KGgGyP+RiGmPfPjJ/Ycbvg==", "requires": { "@tippyjs/react": "^4.2.6", "decorate-component-with-props": "^1.0.2", - "downshift": "^6.1.7", + "downshift": "^7.0.4", "draft-js-plugins-editor": "^2.1.1", "draftjs-conductor": "^3.0.0", "draftjs-filters": "^3.0.1" + }, + "dependencies": { + "compute-scroll-into-view": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-2.0.4.tgz", + "integrity": "sha512-y/ZA3BGnxoM/QHHQ2Uy49CLtnWPbt4tTPpEEZiEmmiWBFKjej7nEyH8Ryz54jH0MLXflUYA3Er2zUxPSJu5R+g==" + }, + "downshift": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", + "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "requires": { + "@babel/runtime": "^7.14.8", + "compute-scroll-into-view": "^2.0.4", + "prop-types": "^15.7.2", + "react-is": "^17.0.2", + "tslib": "^2.3.0" + } + } } }, "draftjs-conductor": { diff --git a/package.json b/package.json index 64401da636db..41568d12db28 100644 --- a/package.json +++ b/package.json @@ -106,7 +106,7 @@ "a11y-dialog": "^7.4.0", "axe-core": "^4.6.2", "draft-js": "^0.10.5", - "draftail": "^2.0.0-rc.2", + "draftail": "^2.0.0-rc.5", "draftjs-filters": "^3.0.1", "focus-trap-react": "^8.4.2", "immer": "^9.0.6", diff --git a/wagtail/admin/templates/wagtailadmin/icons/thumbtack-crossed.svg b/wagtail/admin/templates/wagtailadmin/icons/thumbtack-crossed.svg new file mode 100644 index 000000000000..b675f7f8bdf0 --- /dev/null +++ b/wagtail/admin/templates/wagtailadmin/icons/thumbtack-crossed.svg @@ -0,0 +1,3 @@ +<svg xmlns="http://www.w3.org/2000/svg" id="icon-thumbtack-crossed" viewBox="0 0 16 15"> + <path d="M12.557 7.635H3.032a2.859 2.859 0 0 0-.486 1.578c0 .383.274.656.656.656h3.72v2.953l.655 1.313c.082.164.329.164.41 0l.657-1.313c0-.014.007-.027.014-.041a.097.097 0 0 0 .013-.041V9.869h3.719a.648.648 0 0 0 .656-.656c0-.583-.181-1.114-.489-1.578ZM10.34 2.869l.184 1.69H5.04l.185-1.69H4.077a.632.632 0 0 1-.656-.656V.9c0-.355.274-.656.656-.656h7.438c.355 0 .656.3.656.656v1.313c0 .382-.3.656-.656.656h-1.176ZM0 5.56h16v1.08H0V5.56Z"/> +</svg> diff --git a/wagtail/admin/wagtail_hooks.py b/wagtail/admin/wagtail_hooks.py index 8689b9aa7989..2cf097d696e2 100644 --- a/wagtail/admin/wagtail_hooks.py +++ b/wagtail/admin/wagtail_hooks.py @@ -1126,6 +1126,7 @@ def register_icons(icons): "tag.svg", "tasks.svg", "thumbtack.svg", + "thumbtack-crossed.svg", "tick-inverse.svg", "tick.svg", "time.svg",
scipy__scipy-8316
Shape bug in scipy.optimize.least_squares (trf) with custom loss function and jac as linear operator I believe there is a shape/broadcasting bug in the scipy.optimize._lsq.common::left_multiplied_operator's matmat implementation, which causes a crash when using scipy.optimize.least_squares with a jacobian passed as a linearoperator and a custom loss function ### Reproducing code example: ``` import numpy as np import scipy.optimize from scipy.optimize._numdiff import approx_derivative from scipy.sparse.linalg import aslinearoperator X = np.linspace(1,10) Y = 2*X + 1 + 0.01 * np.random.RandomState(0).randn(len(X)) def fun(p): return X*p[0] + p[1] - Y def jac(x): return aslinearoperator(approx_derivative(fun, x)) r = scipy.optimize.least_squares( fun=lambda p: X*p[0] + p[1] - Y, jac=jac, x0=[0, 0], loss='huber', ) ``` ### Error message: ``` Traceback (most recent call last): File "example.py", line 19, in <module> loss='huber, File "/prefix/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py", line 908, in least_squares tr_options.copy(), verbose) File "/prefix/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.py", line 124, in trf loss_function, tr_solver, tr_options, verbose) File "/prefix/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.py", line 491, in trf_no_bounds JS = J_h.dot(S) File "/prefix/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py", line 364, in dot return self.matmat(x) File "/prefix/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py", line 326, in matmat Y = self._matmat(X) File "/prefix/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py", line 466, in _matmat return self.__matmat_impl(X) File "/prefix/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py", line 642, in matmat return J.matmat(X * d[:, np.newaxis]) File "/prefix/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py", line 326, in matmat Y = self._matmat(X) File "/prefix/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py", line 466, in _matmat return self.__matmat_impl(X) File "/prefix/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py", line 625, in matmat return d * J.matmat(X) ValueError: operands could not be broadcast together with shapes (50,) (50,2) ``` ### Scipy/Numpy/Python version information: ``` $ python -c 'import sys, scipy, numpy; print(scipy.__version__, numpy.__version__, sys.version_info)' ('0.19.1', '1.13.1', sys.version_info(major=2, minor=7, micro=14, releaselevel='final', serial=0)) ```
[ { "content": "\"\"\"Functions used by least-squares algorithms.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom math import copysign\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.linalg import cho_factor, cho_solve, LinAlgError\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator\n\n\nEPS = np.finfo(float).eps\n\n\n# Functions related to a trust-region problem.\n\n\ndef intersect_trust_region(x, s, Delta):\n \"\"\"Find the intersection of a line with the boundary of a trust region.\n \n This function solves the quadratic equation with respect to t\n ||(x + s*t)||**2 = Delta**2.\n \n Returns\n -------\n t_neg, t_pos : tuple of float\n Negative and positive roots.\n \n Raises\n ------\n ValueError\n If `s` is zero or `x` is not within the trust region.\n \"\"\"\n a = np.dot(s, s)\n if a == 0:\n raise ValueError(\"`s` is zero.\")\n\n b = np.dot(x, s)\n\n c = np.dot(x, x) - Delta**2\n if c > 0:\n raise ValueError(\"`x` is not within the trust region.\")\n\n d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.\n\n # Computations below avoid loss of significance, see \"Numerical Recipes\".\n q = -(b + copysign(d, b))\n t1 = q / a\n t2 = c / q\n\n if t1 < t2:\n return t1, t2\n else:\n return t2, t1\n\n\ndef solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,\n rtol=0.01, max_iter=10):\n \"\"\"Solve a trust-region problem arising in least-squares minimization.\n \n This function implements a method described by J. J. More [1]_ and used\n in MINPACK, but it relies on a single SVD of Jacobian instead of series\n of Cholesky decompositions. Before running this function, compute:\n ``U, s, VT = svd(J, full_matrices=False)``.\n \n Parameters\n ----------\n n : int\n Number of variables.\n m : int\n Number of residuals.\n uf : ndarray\n Computed as U.T.dot(f).\n s : ndarray\n Singular values of J.\n V : ndarray\n Transpose of VT.\n Delta : float\n Radius of a trust region.\n initial_alpha : float, optional\n Initial guess for alpha, which might be available from a previous\n iteration. If None, determined automatically.\n rtol : float, optional\n Stopping tolerance for the root-finding procedure. Namely, the\n solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.\n max_iter : int, optional\n Maximum allowed number of iterations for the root-finding procedure.\n \n Returns\n -------\n p : ndarray, shape (n,)\n Found solution of a trust-region problem.\n alpha : float\n Positive value such that (J.T*J + alpha*I)*p = -J.T*f.\n Sometimes called Levenberg-Marquardt parameter.\n n_iter : int\n Number of iterations made by root-finding procedure. Zero means\n that Gauss-Newton step was selected as the solution.\n \n References\n ----------\n .. [1] More, J. J., \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture Notes\n in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n \"\"\"\n def phi_and_derivative(alpha, suf, s, Delta):\n \"\"\"Function of which to find zero.\n \n It is defined as \"norm of regularized (by alpha) least-squares\n solution minus `Delta`\". Refer to [1]_.\n \"\"\"\n denom = s**2 + alpha\n p_norm = norm(suf / denom)\n phi = p_norm - Delta\n phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm\n return phi, phi_prime\n\n suf = s * uf\n\n # Check if J has full rank and try Gauss-Newton step.\n if m >= n:\n threshold = EPS * m * s[0]\n full_rank = s[-1] > threshold\n else:\n full_rank = False\n\n if full_rank:\n p = -V.dot(uf / s)\n if norm(p) <= Delta:\n return p, 0.0, 0\n\n alpha_upper = norm(suf) / Delta\n\n if full_rank:\n phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)\n alpha_lower = -phi / phi_prime\n else:\n alpha_lower = 0.0\n\n if initial_alpha is None or not full_rank and initial_alpha == 0:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n else:\n alpha = initial_alpha\n\n for it in range(max_iter):\n if alpha < alpha_lower or alpha > alpha_upper:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n\n phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)\n\n if phi < 0:\n alpha_upper = alpha\n\n ratio = phi / phi_prime\n alpha_lower = max(alpha_lower, alpha - ratio)\n alpha -= (phi + Delta) * ratio / Delta\n\n if np.abs(phi) < rtol * Delta:\n break\n\n p = -V.dot(suf / (s**2 + alpha))\n\n # Make the norm of p equal to Delta, p is changed only slightly during\n # this. It is done to prevent p lie outside the trust region (which can\n # cause problems later).\n p *= Delta / norm(p)\n\n return p, alpha, it + 1\n\n\ndef solve_trust_region_2d(B, g, Delta):\n \"\"\"Solve a general trust-region problem in 2 dimensions.\n \n The problem is reformulated as a 4-th order algebraic equation,\n the solution of which is found by numpy.roots.\n \n Parameters\n ----------\n B : ndarray, shape (2, 2)\n Symmetric matrix, defines a quadratic term of the function.\n g : ndarray, shape (2,)\n Defines a linear term of the function.\n Delta : float\n Radius of a trust region.\n \n Returns\n -------\n p : ndarray, shape (2,)\n Found solution.\n newton_step : bool\n Whether the returned solution is the Newton step which lies within\n the trust region.\n \"\"\"\n try:\n R, lower = cho_factor(B)\n p = -cho_solve((R, lower), g)\n if np.dot(p, p) <= Delta**2:\n return p, True\n except LinAlgError:\n pass\n\n a = B[0, 0] * Delta**2\n b = B[0, 1] * Delta**2\n c = B[1, 1] * Delta**2\n\n d = g[0] * Delta\n f = g[1] * Delta\n\n coeffs = np.array(\n [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])\n t = np.roots(coeffs) # Can handle leading zeros.\n t = np.real(t[np.isreal(t)])\n\n p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))\n value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)\n i = np.argmin(value)\n p = p[:, i]\n\n return p, False\n\n\ndef update_tr_radius(Delta, actual_reduction, predicted_reduction,\n step_norm, bound_hit):\n \"\"\"Update the radius of a trust region based on the cost reduction.\n\n Returns\n -------\n Delta : float\n New radius.\n ratio : float\n Ratio between actual and predicted reductions. Zero if predicted\n reduction is zero.\n \"\"\"\n if predicted_reduction > 0:\n ratio = actual_reduction / predicted_reduction\n else:\n ratio = 0\n\n if ratio < 0.25:\n Delta = 0.25 * step_norm\n elif ratio > 0.75 and bound_hit:\n Delta *= 2.0\n\n return Delta, ratio\n\n\n# Construction and minimization of quadratic functions.\n\n\ndef build_quadratic_1d(J, g, s, diag=None, s0=None):\n \"\"\"Parameterize a multivariate quadratic function along a line.\n \n The resulting univariate quadratic function is given as follows:\n ::\n f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +\n g.T * (s0 + s*t)\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (n,)\n Direction vector of a line.\n diag : None or ndarray with shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n s0 : None or ndarray with shape (n,), optional\n Initial point. If None, assumed to be 0.\n \n Returns\n -------\n a : float\n Coefficient for t**2.\n b : float\n Coefficient for t.\n c : float\n Free term. Returned only if `s0` is provided.\n \"\"\"\n v = J.dot(s)\n a = np.dot(v, v)\n if diag is not None:\n a += np.dot(s * diag, s)\n a *= 0.5\n\n b = np.dot(g, s)\n\n if s0 is not None:\n u = J.dot(s0)\n b += np.dot(u, v)\n c = 0.5 * np.dot(u, u) + np.dot(g, s0)\n if diag is not None:\n b += np.dot(s0 * diag, s)\n c += 0.5 * np.dot(s0 * diag, s0)\n return a, b, c\n else:\n return a, b\n\n\ndef minimize_quadratic_1d(a, b, lb, ub, c=0):\n \"\"\"Minimize a 1-d quadratic function subject to bounds.\n \n The free term `c` is 0 by default. Bounds must be finite.\n \n Returns\n -------\n t : float\n Minimum point.\n y : float\n Minimum value.\n \"\"\"\n t = [lb, ub]\n if a != 0:\n extremum = -0.5 * b / a\n if lb < extremum < ub:\n t.append(extremum)\n t = np.asarray(t)\n y = a * t**2 + b * t + c\n min_index = np.argmin(y)\n return t[min_index], y[min_index]\n\n\ndef evaluate_quadratic(J, g, s, diag=None):\n \"\"\"Compute values of a quadratic function arising in least squares.\n \n The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (k, n) or (n,)\n Array containing steps as rows.\n diag : ndarray, shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n \n Returns\n -------\n values : ndarray with shape (k,) or float\n Values of the function. If `s` was 2-dimensional then ndarray is\n returned, otherwise float is returned.\n \"\"\"\n if s.ndim == 1:\n Js = J.dot(s)\n q = np.dot(Js, Js)\n if diag is not None:\n q += np.dot(s * diag, s)\n else:\n Js = J.dot(s.T)\n q = np.sum(Js**2, axis=0)\n if diag is not None:\n q += np.sum(diag * s**2, axis=1)\n\n l = np.dot(s, g)\n\n return 0.5 * q + l\n\n\n# Utility functions to work with bound constraints.\n\n\ndef in_bounds(x, lb, ub):\n \"\"\"Check if a point lies within bounds.\"\"\"\n return np.all((x >= lb) & (x <= ub))\n\n\ndef step_size_to_bound(x, s, lb, ub):\n \"\"\"Compute a min_step size required to reach a bound.\n \n The function computes a positive scalar t, such that x + s * t is on\n the bound.\n \n Returns\n -------\n step : float\n Computed step. Non-negative value.\n hits : ndarray of int with shape of x\n Each element indicates whether a corresponding variable reaches the\n bound:\n \n * 0 - the bound was not hit.\n * -1 - the lower bound was hit.\n * 1 - the upper bound was hit.\n \"\"\"\n non_zero = np.nonzero(s)\n s_non_zero = s[non_zero]\n steps = np.empty_like(x)\n steps.fill(np.inf)\n with np.errstate(over='ignore'):\n steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,\n (ub - x)[non_zero] / s_non_zero)\n min_step = np.min(steps)\n return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)\n\n\ndef find_active_constraints(x, lb, ub, rtol=1e-10):\n \"\"\"Determine which constraints are active in a given point.\n \n The threshold is computed using `rtol` and the absolute value of the\n closest bound.\n \n Returns\n -------\n active : ndarray of int with shape of x\n Each component shows whether the corresponding constraint is active:\n \n * 0 - a constraint is not active.\n * -1 - a lower bound is active.\n * 1 - a upper bound is active.\n \"\"\"\n active = np.zeros_like(x, dtype=int)\n\n if rtol == 0:\n active[x <= lb] = -1\n active[x >= ub] = 1\n return active\n\n lower_dist = x - lb\n upper_dist = ub - x\n\n lower_threshold = rtol * np.maximum(1, np.abs(lb))\n upper_threshold = rtol * np.maximum(1, np.abs(ub))\n\n lower_active = (np.isfinite(lb) &\n (lower_dist <= np.minimum(upper_dist, lower_threshold)))\n active[lower_active] = -1\n\n upper_active = (np.isfinite(ub) &\n (upper_dist <= np.minimum(lower_dist, upper_threshold)))\n active[upper_active] = 1\n\n return active\n\n\ndef make_strictly_feasible(x, lb, ub, rstep=1e-10):\n \"\"\"Shift a point to the interior of a feasible region.\n \n Each element of the returned vector is at least at a relative distance\n `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.\n \"\"\"\n x_new = x.copy()\n\n active = find_active_constraints(x, lb, ub, rstep)\n lower_mask = np.equal(active, -1)\n upper_mask = np.equal(active, 1)\n\n if rstep == 0:\n x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])\n x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])\n else:\n x_new[lower_mask] = (lb[lower_mask] +\n rstep * np.maximum(1, np.abs(lb[lower_mask])))\n x_new[upper_mask] = (ub[upper_mask] -\n rstep * np.maximum(1, np.abs(ub[upper_mask])))\n\n tight_bounds = (x_new < lb) | (x_new > ub)\n x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])\n\n return x_new\n\n \ndef CL_scaling_vector(x, g, lb, ub):\n \"\"\"Compute Coleman-Li scaling vector and its derivatives.\n \n Components of a vector v are defined as follows:\n ::\n | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf\n v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf\n | 1, otherwise\n \n According to this definition v[i] >= 0 for all i. It differs from the\n definition in paper [1]_ (eq. (2.2)), where the absolute value of v is\n used. Both definitions are equivalent down the line.\n Derivatives of v with respect to x take value 1, -1 or 0 depending on a\n case.\n \n Returns\n -------\n v : ndarray with shape of x\n Scaling vector.\n dv : ndarray with shape of x\n Derivatives of v[i] with respect to x[i], diagonal elements of v's\n Jacobian.\n \n References\n ----------\n .. [1] M.A. Branch, T.F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n \"\"\"\n v = np.ones_like(x)\n dv = np.zeros_like(x)\n\n mask = (g < 0) & np.isfinite(ub)\n v[mask] = ub[mask] - x[mask]\n dv[mask] = -1\n\n mask = (g > 0) & np.isfinite(lb)\n v[mask] = x[mask] - lb[mask]\n dv[mask] = 1\n\n return v, dv\n\n\ndef reflective_transformation(y, lb, ub):\n \"\"\"Compute reflective transformation and its gradient.\"\"\"\n if in_bounds(y, lb, ub):\n return y, np.ones_like(y)\n\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n\n x = y.copy()\n g_negative = np.zeros_like(y, dtype=bool)\n\n mask = lb_finite & ~ub_finite\n x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])\n g_negative[mask] = y[mask] < lb[mask]\n\n mask = ~lb_finite & ub_finite\n x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])\n g_negative[mask] = y[mask] > ub[mask]\n\n mask = lb_finite & ub_finite\n d = ub - lb\n t = np.remainder(y[mask] - lb[mask], 2 * d[mask])\n x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)\n g_negative[mask] = t > d[mask]\n\n g = np.ones_like(y)\n g[g_negative] = -1\n\n return x, g\n\n\n# Functions to display algorithm's progress.\n\n\ndef print_header_nonlinear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}\"\n .format(\"Iteration\", \"Total nfev\", \"Cost\", \"Cost reduction\",\n \"Step norm\", \"Optimality\"))\n\n\ndef print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,\n step_norm, optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}\"\n .format(iteration, nfev, cost, cost_reduction,\n step_norm, optimality))\n\n\ndef print_header_linear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}\"\n .format(\"Iteration\", \"Cost\", \"Cost reduction\", \"Step norm\",\n \"Optimality\"))\n\n\ndef print_iteration_linear(iteration, cost, cost_reduction, step_norm,\n optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}\".format(\n iteration, cost, cost_reduction, step_norm, optimality))\n\n\n# Simple helper functions.\n\n\ndef compute_grad(J, f):\n \"\"\"Compute gradient of the least-squares cost function.\"\"\"\n if isinstance(J, LinearOperator):\n return J.rmatvec(f)\n else:\n return J.T.dot(f)\n\n\ndef compute_jac_scale(J, scale_inv_old=None):\n \"\"\"Compute variables scale based on the Jacobian matrix.\"\"\"\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5\n else:\n scale_inv = np.sum(J**2, axis=0)**0.5\n\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n\n return 1 / scale_inv, scale_inv\n\n\ndef left_multiplied_operator(J, d):\n \"\"\"Return diag(d) J as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return d * J.matvec(x)\n\n def matmat(X):\n return d * J.matmat(X)\n\n def rmatvec(x):\n return J.rmatvec(x.ravel() * d)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef right_multiplied_operator(J, d):\n \"\"\"Return J diag(d) as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return J.matvec(np.ravel(x) * d)\n\n def matmat(X):\n return J.matmat(X * d[:, np.newaxis])\n\n def rmatvec(x):\n return d * J.rmatvec(x)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef regularized_lsq_operator(J, diag):\n \"\"\"Return a matrix arising in regularized least squares as LinearOperator.\n \n The matrix is\n [ J ]\n [ D ]\n where D is diagonal matrix with elements from `diag`.\n \"\"\"\n J = aslinearoperator(J)\n m, n = J.shape\n\n def matvec(x):\n return np.hstack((J.matvec(x), diag * x))\n\n def rmatvec(x):\n x1 = x[:m]\n x2 = x[m:]\n return J.rmatvec(x1) + diag * x2\n\n return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)\n\n\ndef right_multiply(J, d, copy=True):\n \"\"\"Compute J diag(d).\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = right_multiplied_operator(J, d)\n else:\n J *= d\n\n return J\n\n\ndef left_multiply(J, d, copy=True):\n \"\"\"Compute diag(d) J.\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = left_multiplied_operator(J, d)\n else:\n J *= d[:, np.newaxis]\n\n return J\n\n\ndef check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n \"\"\"Check termination condition for nonlinear least squares.\"\"\"\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None\n\n\ndef scale_for_robust_loss_function(J, f, rho):\n \"\"\"Scale Jacobian and residuals for a robust loss function.\n \n Arrays are modified in place.\n \"\"\"\n J_scale = rho[1] + 2 * rho[2] * f**2\n J_scale[J_scale < EPS] = EPS\n J_scale **= 0.5\n\n f *= rho[1] / J_scale\n\n return left_multiply(J, J_scale, copy=False), f\n", "path": "scipy/optimize/_lsq/common.py" } ]
[ { "content": "\"\"\"Functions used by least-squares algorithms.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom math import copysign\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.linalg import cho_factor, cho_solve, LinAlgError\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator\n\n\nEPS = np.finfo(float).eps\n\n\n# Functions related to a trust-region problem.\n\n\ndef intersect_trust_region(x, s, Delta):\n \"\"\"Find the intersection of a line with the boundary of a trust region.\n \n This function solves the quadratic equation with respect to t\n ||(x + s*t)||**2 = Delta**2.\n \n Returns\n -------\n t_neg, t_pos : tuple of float\n Negative and positive roots.\n \n Raises\n ------\n ValueError\n If `s` is zero or `x` is not within the trust region.\n \"\"\"\n a = np.dot(s, s)\n if a == 0:\n raise ValueError(\"`s` is zero.\")\n\n b = np.dot(x, s)\n\n c = np.dot(x, x) - Delta**2\n if c > 0:\n raise ValueError(\"`x` is not within the trust region.\")\n\n d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.\n\n # Computations below avoid loss of significance, see \"Numerical Recipes\".\n q = -(b + copysign(d, b))\n t1 = q / a\n t2 = c / q\n\n if t1 < t2:\n return t1, t2\n else:\n return t2, t1\n\n\ndef solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,\n rtol=0.01, max_iter=10):\n \"\"\"Solve a trust-region problem arising in least-squares minimization.\n \n This function implements a method described by J. J. More [1]_ and used\n in MINPACK, but it relies on a single SVD of Jacobian instead of series\n of Cholesky decompositions. Before running this function, compute:\n ``U, s, VT = svd(J, full_matrices=False)``.\n \n Parameters\n ----------\n n : int\n Number of variables.\n m : int\n Number of residuals.\n uf : ndarray\n Computed as U.T.dot(f).\n s : ndarray\n Singular values of J.\n V : ndarray\n Transpose of VT.\n Delta : float\n Radius of a trust region.\n initial_alpha : float, optional\n Initial guess for alpha, which might be available from a previous\n iteration. If None, determined automatically.\n rtol : float, optional\n Stopping tolerance for the root-finding procedure. Namely, the\n solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.\n max_iter : int, optional\n Maximum allowed number of iterations for the root-finding procedure.\n \n Returns\n -------\n p : ndarray, shape (n,)\n Found solution of a trust-region problem.\n alpha : float\n Positive value such that (J.T*J + alpha*I)*p = -J.T*f.\n Sometimes called Levenberg-Marquardt parameter.\n n_iter : int\n Number of iterations made by root-finding procedure. Zero means\n that Gauss-Newton step was selected as the solution.\n \n References\n ----------\n .. [1] More, J. J., \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture Notes\n in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n \"\"\"\n def phi_and_derivative(alpha, suf, s, Delta):\n \"\"\"Function of which to find zero.\n \n It is defined as \"norm of regularized (by alpha) least-squares\n solution minus `Delta`\". Refer to [1]_.\n \"\"\"\n denom = s**2 + alpha\n p_norm = norm(suf / denom)\n phi = p_norm - Delta\n phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm\n return phi, phi_prime\n\n suf = s * uf\n\n # Check if J has full rank and try Gauss-Newton step.\n if m >= n:\n threshold = EPS * m * s[0]\n full_rank = s[-1] > threshold\n else:\n full_rank = False\n\n if full_rank:\n p = -V.dot(uf / s)\n if norm(p) <= Delta:\n return p, 0.0, 0\n\n alpha_upper = norm(suf) / Delta\n\n if full_rank:\n phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)\n alpha_lower = -phi / phi_prime\n else:\n alpha_lower = 0.0\n\n if initial_alpha is None or not full_rank and initial_alpha == 0:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n else:\n alpha = initial_alpha\n\n for it in range(max_iter):\n if alpha < alpha_lower or alpha > alpha_upper:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n\n phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)\n\n if phi < 0:\n alpha_upper = alpha\n\n ratio = phi / phi_prime\n alpha_lower = max(alpha_lower, alpha - ratio)\n alpha -= (phi + Delta) * ratio / Delta\n\n if np.abs(phi) < rtol * Delta:\n break\n\n p = -V.dot(suf / (s**2 + alpha))\n\n # Make the norm of p equal to Delta, p is changed only slightly during\n # this. It is done to prevent p lie outside the trust region (which can\n # cause problems later).\n p *= Delta / norm(p)\n\n return p, alpha, it + 1\n\n\ndef solve_trust_region_2d(B, g, Delta):\n \"\"\"Solve a general trust-region problem in 2 dimensions.\n \n The problem is reformulated as a 4-th order algebraic equation,\n the solution of which is found by numpy.roots.\n \n Parameters\n ----------\n B : ndarray, shape (2, 2)\n Symmetric matrix, defines a quadratic term of the function.\n g : ndarray, shape (2,)\n Defines a linear term of the function.\n Delta : float\n Radius of a trust region.\n \n Returns\n -------\n p : ndarray, shape (2,)\n Found solution.\n newton_step : bool\n Whether the returned solution is the Newton step which lies within\n the trust region.\n \"\"\"\n try:\n R, lower = cho_factor(B)\n p = -cho_solve((R, lower), g)\n if np.dot(p, p) <= Delta**2:\n return p, True\n except LinAlgError:\n pass\n\n a = B[0, 0] * Delta**2\n b = B[0, 1] * Delta**2\n c = B[1, 1] * Delta**2\n\n d = g[0] * Delta\n f = g[1] * Delta\n\n coeffs = np.array(\n [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])\n t = np.roots(coeffs) # Can handle leading zeros.\n t = np.real(t[np.isreal(t)])\n\n p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))\n value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)\n i = np.argmin(value)\n p = p[:, i]\n\n return p, False\n\n\ndef update_tr_radius(Delta, actual_reduction, predicted_reduction,\n step_norm, bound_hit):\n \"\"\"Update the radius of a trust region based on the cost reduction.\n\n Returns\n -------\n Delta : float\n New radius.\n ratio : float\n Ratio between actual and predicted reductions. Zero if predicted\n reduction is zero.\n \"\"\"\n if predicted_reduction > 0:\n ratio = actual_reduction / predicted_reduction\n else:\n ratio = 0\n\n if ratio < 0.25:\n Delta = 0.25 * step_norm\n elif ratio > 0.75 and bound_hit:\n Delta *= 2.0\n\n return Delta, ratio\n\n\n# Construction and minimization of quadratic functions.\n\n\ndef build_quadratic_1d(J, g, s, diag=None, s0=None):\n \"\"\"Parameterize a multivariate quadratic function along a line.\n \n The resulting univariate quadratic function is given as follows:\n ::\n f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +\n g.T * (s0 + s*t)\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (n,)\n Direction vector of a line.\n diag : None or ndarray with shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n s0 : None or ndarray with shape (n,), optional\n Initial point. If None, assumed to be 0.\n \n Returns\n -------\n a : float\n Coefficient for t**2.\n b : float\n Coefficient for t.\n c : float\n Free term. Returned only if `s0` is provided.\n \"\"\"\n v = J.dot(s)\n a = np.dot(v, v)\n if diag is not None:\n a += np.dot(s * diag, s)\n a *= 0.5\n\n b = np.dot(g, s)\n\n if s0 is not None:\n u = J.dot(s0)\n b += np.dot(u, v)\n c = 0.5 * np.dot(u, u) + np.dot(g, s0)\n if diag is not None:\n b += np.dot(s0 * diag, s)\n c += 0.5 * np.dot(s0 * diag, s0)\n return a, b, c\n else:\n return a, b\n\n\ndef minimize_quadratic_1d(a, b, lb, ub, c=0):\n \"\"\"Minimize a 1-d quadratic function subject to bounds.\n \n The free term `c` is 0 by default. Bounds must be finite.\n \n Returns\n -------\n t : float\n Minimum point.\n y : float\n Minimum value.\n \"\"\"\n t = [lb, ub]\n if a != 0:\n extremum = -0.5 * b / a\n if lb < extremum < ub:\n t.append(extremum)\n t = np.asarray(t)\n y = a * t**2 + b * t + c\n min_index = np.argmin(y)\n return t[min_index], y[min_index]\n\n\ndef evaluate_quadratic(J, g, s, diag=None):\n \"\"\"Compute values of a quadratic function arising in least squares.\n \n The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (k, n) or (n,)\n Array containing steps as rows.\n diag : ndarray, shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n \n Returns\n -------\n values : ndarray with shape (k,) or float\n Values of the function. If `s` was 2-dimensional then ndarray is\n returned, otherwise float is returned.\n \"\"\"\n if s.ndim == 1:\n Js = J.dot(s)\n q = np.dot(Js, Js)\n if diag is not None:\n q += np.dot(s * diag, s)\n else:\n Js = J.dot(s.T)\n q = np.sum(Js**2, axis=0)\n if diag is not None:\n q += np.sum(diag * s**2, axis=1)\n\n l = np.dot(s, g)\n\n return 0.5 * q + l\n\n\n# Utility functions to work with bound constraints.\n\n\ndef in_bounds(x, lb, ub):\n \"\"\"Check if a point lies within bounds.\"\"\"\n return np.all((x >= lb) & (x <= ub))\n\n\ndef step_size_to_bound(x, s, lb, ub):\n \"\"\"Compute a min_step size required to reach a bound.\n \n The function computes a positive scalar t, such that x + s * t is on\n the bound.\n \n Returns\n -------\n step : float\n Computed step. Non-negative value.\n hits : ndarray of int with shape of x\n Each element indicates whether a corresponding variable reaches the\n bound:\n \n * 0 - the bound was not hit.\n * -1 - the lower bound was hit.\n * 1 - the upper bound was hit.\n \"\"\"\n non_zero = np.nonzero(s)\n s_non_zero = s[non_zero]\n steps = np.empty_like(x)\n steps.fill(np.inf)\n with np.errstate(over='ignore'):\n steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,\n (ub - x)[non_zero] / s_non_zero)\n min_step = np.min(steps)\n return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)\n\n\ndef find_active_constraints(x, lb, ub, rtol=1e-10):\n \"\"\"Determine which constraints are active in a given point.\n \n The threshold is computed using `rtol` and the absolute value of the\n closest bound.\n \n Returns\n -------\n active : ndarray of int with shape of x\n Each component shows whether the corresponding constraint is active:\n \n * 0 - a constraint is not active.\n * -1 - a lower bound is active.\n * 1 - a upper bound is active.\n \"\"\"\n active = np.zeros_like(x, dtype=int)\n\n if rtol == 0:\n active[x <= lb] = -1\n active[x >= ub] = 1\n return active\n\n lower_dist = x - lb\n upper_dist = ub - x\n\n lower_threshold = rtol * np.maximum(1, np.abs(lb))\n upper_threshold = rtol * np.maximum(1, np.abs(ub))\n\n lower_active = (np.isfinite(lb) &\n (lower_dist <= np.minimum(upper_dist, lower_threshold)))\n active[lower_active] = -1\n\n upper_active = (np.isfinite(ub) &\n (upper_dist <= np.minimum(lower_dist, upper_threshold)))\n active[upper_active] = 1\n\n return active\n\n\ndef make_strictly_feasible(x, lb, ub, rstep=1e-10):\n \"\"\"Shift a point to the interior of a feasible region.\n \n Each element of the returned vector is at least at a relative distance\n `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.\n \"\"\"\n x_new = x.copy()\n\n active = find_active_constraints(x, lb, ub, rstep)\n lower_mask = np.equal(active, -1)\n upper_mask = np.equal(active, 1)\n\n if rstep == 0:\n x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])\n x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])\n else:\n x_new[lower_mask] = (lb[lower_mask] +\n rstep * np.maximum(1, np.abs(lb[lower_mask])))\n x_new[upper_mask] = (ub[upper_mask] -\n rstep * np.maximum(1, np.abs(ub[upper_mask])))\n\n tight_bounds = (x_new < lb) | (x_new > ub)\n x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])\n\n return x_new\n\n \ndef CL_scaling_vector(x, g, lb, ub):\n \"\"\"Compute Coleman-Li scaling vector and its derivatives.\n \n Components of a vector v are defined as follows:\n ::\n | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf\n v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf\n | 1, otherwise\n \n According to this definition v[i] >= 0 for all i. It differs from the\n definition in paper [1]_ (eq. (2.2)), where the absolute value of v is\n used. Both definitions are equivalent down the line.\n Derivatives of v with respect to x take value 1, -1 or 0 depending on a\n case.\n \n Returns\n -------\n v : ndarray with shape of x\n Scaling vector.\n dv : ndarray with shape of x\n Derivatives of v[i] with respect to x[i], diagonal elements of v's\n Jacobian.\n \n References\n ----------\n .. [1] M.A. Branch, T.F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n \"\"\"\n v = np.ones_like(x)\n dv = np.zeros_like(x)\n\n mask = (g < 0) & np.isfinite(ub)\n v[mask] = ub[mask] - x[mask]\n dv[mask] = -1\n\n mask = (g > 0) & np.isfinite(lb)\n v[mask] = x[mask] - lb[mask]\n dv[mask] = 1\n\n return v, dv\n\n\ndef reflective_transformation(y, lb, ub):\n \"\"\"Compute reflective transformation and its gradient.\"\"\"\n if in_bounds(y, lb, ub):\n return y, np.ones_like(y)\n\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n\n x = y.copy()\n g_negative = np.zeros_like(y, dtype=bool)\n\n mask = lb_finite & ~ub_finite\n x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])\n g_negative[mask] = y[mask] < lb[mask]\n\n mask = ~lb_finite & ub_finite\n x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])\n g_negative[mask] = y[mask] > ub[mask]\n\n mask = lb_finite & ub_finite\n d = ub - lb\n t = np.remainder(y[mask] - lb[mask], 2 * d[mask])\n x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)\n g_negative[mask] = t > d[mask]\n\n g = np.ones_like(y)\n g[g_negative] = -1\n\n return x, g\n\n\n# Functions to display algorithm's progress.\n\n\ndef print_header_nonlinear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}\"\n .format(\"Iteration\", \"Total nfev\", \"Cost\", \"Cost reduction\",\n \"Step norm\", \"Optimality\"))\n\n\ndef print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,\n step_norm, optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}\"\n .format(iteration, nfev, cost, cost_reduction,\n step_norm, optimality))\n\n\ndef print_header_linear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}\"\n .format(\"Iteration\", \"Cost\", \"Cost reduction\", \"Step norm\",\n \"Optimality\"))\n\n\ndef print_iteration_linear(iteration, cost, cost_reduction, step_norm,\n optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}\".format(\n iteration, cost, cost_reduction, step_norm, optimality))\n\n\n# Simple helper functions.\n\n\ndef compute_grad(J, f):\n \"\"\"Compute gradient of the least-squares cost function.\"\"\"\n if isinstance(J, LinearOperator):\n return J.rmatvec(f)\n else:\n return J.T.dot(f)\n\n\ndef compute_jac_scale(J, scale_inv_old=None):\n \"\"\"Compute variables scale based on the Jacobian matrix.\"\"\"\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5\n else:\n scale_inv = np.sum(J**2, axis=0)**0.5\n\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n\n return 1 / scale_inv, scale_inv\n\n\ndef left_multiplied_operator(J, d):\n \"\"\"Return diag(d) J as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return d * J.matvec(x)\n\n def matmat(X):\n return d[:, np.newaxis] * J.matmat(X)\n\n def rmatvec(x):\n return J.rmatvec(x.ravel() * d)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef right_multiplied_operator(J, d):\n \"\"\"Return J diag(d) as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return J.matvec(np.ravel(x) * d)\n\n def matmat(X):\n return J.matmat(X * d[:, np.newaxis])\n\n def rmatvec(x):\n return d * J.rmatvec(x)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef regularized_lsq_operator(J, diag):\n \"\"\"Return a matrix arising in regularized least squares as LinearOperator.\n \n The matrix is\n [ J ]\n [ D ]\n where D is diagonal matrix with elements from `diag`.\n \"\"\"\n J = aslinearoperator(J)\n m, n = J.shape\n\n def matvec(x):\n return np.hstack((J.matvec(x), diag * x))\n\n def rmatvec(x):\n x1 = x[:m]\n x2 = x[m:]\n return J.rmatvec(x1) + diag * x2\n\n return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)\n\n\ndef right_multiply(J, d, copy=True):\n \"\"\"Compute J diag(d).\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = right_multiplied_operator(J, d)\n else:\n J *= d\n\n return J\n\n\ndef left_multiply(J, d, copy=True):\n \"\"\"Compute diag(d) J.\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = left_multiplied_operator(J, d)\n else:\n J *= d[:, np.newaxis]\n\n return J\n\n\ndef check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n \"\"\"Check termination condition for nonlinear least squares.\"\"\"\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None\n\n\ndef scale_for_robust_loss_function(J, f, rho):\n \"\"\"Scale Jacobian and residuals for a robust loss function.\n \n Arrays are modified in place.\n \"\"\"\n J_scale = rho[1] + 2 * rho[2] * f**2\n J_scale[J_scale < EPS] = EPS\n J_scale **= 0.5\n\n f *= rho[1] / J_scale\n\n return left_multiply(J, J_scale, copy=False), f\n", "path": "scipy/optimize/_lsq/common.py" } ]
diff --git a/scipy/optimize/_lsq/common.py b/scipy/optimize/_lsq/common.py index 61a376e12d6d..2327d61e4b2e 100644 --- a/scipy/optimize/_lsq/common.py +++ b/scipy/optimize/_lsq/common.py @@ -622,7 +622,7 @@ def matvec(x): return d * J.matvec(x) def matmat(X): - return d * J.matmat(X) + return d[:, np.newaxis] * J.matmat(X) def rmatvec(x): return J.rmatvec(x.ravel() * d) diff --git a/scipy/optimize/tests/test_lsq_common.py b/scipy/optimize/tests/test_lsq_common.py index 42d866c9b8e7..bda69c9ec99a 100644 --- a/scipy/optimize/tests/test_lsq_common.py +++ b/scipy/optimize/tests/test_lsq_common.py @@ -4,10 +4,12 @@ from pytest import raises as assert_raises import numpy as np +from scipy.sparse.linalg import LinearOperator from scipy.optimize._lsq.common import ( step_size_to_bound, find_active_constraints, make_strictly_feasible, CL_scaling_vector, intersect_trust_region, build_quadratic_1d, - minimize_quadratic_1d, evaluate_quadratic, reflective_transformation) + minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, + left_multiplied_operator, right_multiplied_operator) class TestBounds(object): @@ -248,3 +250,26 @@ def test_reflective_transformation(): assert_equal(x, [0, 10]) assert_equal(g, [-1, 1]) + +def test_linear_operators(): + A = np.arange(6).reshape((3, 2)) + + d_left = np.array([-1, 2, 5]) + DA = np.diag(d_left).dot(A) + J_left = left_multiplied_operator(A, d_left) + + d_right = np.array([5, 10]) + AD = A.dot(np.diag(d_right)) + J_right = right_multiplied_operator(A, d_right) + + x = np.array([-2, 3]) + X = -2 * np.arange(2, 8).reshape((2, 3)) + xt = np.array([0, -2, 15]) + + assert_allclose(DA.dot(x), J_left.matvec(x)) + assert_allclose(DA.dot(X), J_left.matmat(X)) + assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) + + assert_allclose(AD.dot(x), J_right.matvec(x)) + assert_allclose(AD.dot(X), J_right.matmat(X)) + assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
cookiecutter__cookiecutter-1578
prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0 * Cookiecutter version: 1.7.3 * Template project url: - * Python version: 3.9.5 * Operating System: macOS Catalina 10.15.7 ### Description: Apparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json cookiecutter.json example: ```json { "project_name": "", "project_policy": {"project_policy_example": "yes"} } ``` ``` % python -m cookiecutter ../Projects/project-configs devplatform_project_name [infra-dev]: project_name []: t project_policy [default]: Error: Unable to decode to JSON. ``` Looking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`. Link to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81 ![image](https://user-images.githubusercontent.com/41152092/118825592-7a01bd80-b8c3-11eb-96fb-1f7bba264783.png) As far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/ ### Quick local fix Install click first and specify version older than 8.0.0 ``` pip install click==7.1.2 pip install cookiecutter ``` ### Quick fix for cookiecutter library in `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'` ### What I've run: ```shell % python3.9 -m venv test39 % source test39/bin/activate % python -V Python 3.9.5 % python -m pip install click==7.1.2 Collecting click==7.1.2 Using cached click-7.1.2-py2.py3-none-any.whl (82 kB) Installing collected packages: click Successfully installed click-7.1.2 (test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter Collecting cookiecutter Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB) Collecting six>=1.10 ................................................ Installing collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter Successfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4 % python -m cookiecutter ../Projects/project-configs project_name []: t project_policy [default]: % ls t Makefile README.md t tests % rm -rf t % python -m pip install click==8.0.0 Collecting click==8.0.0 Using cached click-8.0.0-py3-none-any.whl (96 kB) Installing collected packages: click Attempting uninstall: click Found existing installation: click 7.1.2 Uninstalling click-7.1.2: Successfully uninstalled click-7.1.2 Successfully installed click-8.0.0 % python -m cookiecutter ../Projects/project-configs devplatform_project_name [infra-dev]: project_name []: t project_policy [default]: Error: Unable to decode to JSON. project_policy [default]: Error: Unable to decode to JSON. ```
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.3\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={':python_version<\"3.3\"': ['whichcraft>=0.4.0']},\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.3\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<8.0.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={':python_version<\"3.3\"': ['whichcraft>=0.4.0']},\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 034ad1c30..601fc2305 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ requirements = [ 'binaryornot>=0.4.4', 'Jinja2>=2.7,<4.0.0', - 'click>=7.0', + 'click>=7.0,<8.0.0', 'poyo>=0.5.0', 'jinja2-time>=0.2.0', 'python-slugify>=4.0.0', diff --git a/tests/test_read_user_dict.py b/tests/test_read_user_dict.py index baa230470..95054b4c2 100644 --- a/tests/test_read_user_dict.py +++ b/tests/test_read_user_dict.py @@ -102,6 +102,17 @@ def test_should_call_prompt_with_process_json(mocker): ) +def test_should_not_call_process_json_default_value(mocker, monkeypatch): + """Make sure that `process_json` is not called when using default value.""" + mock_process_json = mocker.patch('cookiecutter.prompt.process_json', autospec=True) + + runner = click.testing.CliRunner() + with runner.isolation(input="\n"): + read_user_dict('name', {'project_slug': 'pytest-plugin'}) + + mock_process_json.assert_not_called() + + def test_read_user_dict_default_value(mocker): """Make sure that `read_user_dict` returns the default value.
microsoft__ptvsd-259
Debugging of modules using -m is broken Used to be able to debug Flask & other code using the `-m` flag.
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n# import the wrapper first, so that it gets a chance\n# to detour pydevd socket functionality.\nimport ptvsd.wrapper\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a4\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n _run_module(address, filename)\n else:\n _run_file(address, filename)\n\n\ndef _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n argv.append('--module')\n _run(argv)\n\n\ndef _run_file(address, filename):\n argv = _run_argv(address, filename)\n _run(argv)\n\n\ndef _run_argv(address, filename):\n host, port = address\n if host is None:\n host = '127.0.0.1'\n return [\n '--port', str(port),\n '--client', host,\n '--file', filename,\n ]\n\n\ndef _run(argv):\n import pydevd\n sys.argv[1:0] = argv\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n# import the wrapper first, so that it gets a chance\n# to detour pydevd socket functionality.\nimport ptvsd.wrapper\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a3\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n _run_module(address, filename)\n else:\n _run_file(address, filename)\n\n\ndef _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n argv.insert(argv.index('--file'), '--module')\n _run(argv)\n\n\ndef _run_file(address, filename):\n argv = _run_argv(address, filename)\n _run(argv)\n\n\ndef _run_argv(address, filename):\n host, port = address\n if host is None:\n host = '127.0.0.1'\n return [\n '--port', str(port),\n '--client', host,\n '--file', filename,\n ]\n\n\ndef _run(argv):\n import pydevd\n sys.argv[1:0] = argv\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py" } ]
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py index 2159715c5..d8afbde9a 100644 --- a/ptvsd/debugger.py +++ b/ptvsd/debugger.py @@ -27,7 +27,7 @@ def debug(filename, port_num, debug_id, debug_options, run_as): def _run_module(address, modname): filename = modname + ':' argv = _run_argv(address, filename) - argv.append('--module') + argv.insert(argv.index('--file'), '--module') _run(argv)
RedHatInsights__insights-core-2985
DatasourceProvider reads file-content in "bytes" format and causing parsing to fail Original Exception raised by the parser ``` File "/work/insights/insights-core/insights/core/__init__.py", line 95, in _handle_content self.parse_content(context.content) File "/work/insights/insights-core/insights/parsers/ld_library_path.py", line 56, in parse_content user, _, raw = [s.strip() for s in line.partition(' ')] AttributeError: 'int' object has no attribute 'partition' ``` Collected `meta_data`: ```json # cat /tmp/insights-vm37-39.gsslab.pek2.redhat.com-20210308120914/meta_data/insights.specs.Specs.ld_library_path_of_user.json | python3 -m json.tool { "exec_time": 0.0009329319000244141, "ser_time": 0.0006611347198486328, "errors": [], "name": "insights.specs.Specs.ld_library_path_of_user", "results": { "object": { "relative_path": "insights_commands/echo_user_LD_LIBRARY_PATH" }, "type": "insights.core.spec_factory.DatasourceProvider" } } ``` The content: ``` # insights-inspect insights.specs.Specs.ld_library_path_of_user /tmp/insights-vm37-39.gsslab.pek2.redhat.com-20210308120914 In [1]: type(ld_library_path_of_user) Out[1]: insights.core.spec_factory.SerializedRawOutputProvider In [2]: type(ld_library_path_of_user.content) Out[2]: bytes In [3]: ld_library_path_of_user.content Out[3]: b'rh1adm /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib\nsr1adm /usr/sap/SR1/HDB02/exe/krb5/lib/krb5/plugins/preauth:/usr/sap/SR1/HDB02/exe/krb5/lib:/usr/sap/SR1/HDB02/exe:/usr/sap/SR1/HDB02/exe/Python/lib:/usr/sap/SR1/HDB02/exe/filter:/usr/sap/SR1/HDB02/exe/dat_bin_dir:/usr/sap/SR1/HDB02/exe/plugins/afl:/usr/sap/SR1/HDB02/exe/plugins/lcapps:/usr/sap/SR1/HDB02/exe/plugins/repository:/usr/sap/SR1/HDB02/exe/plugins/epmmds:/usr/sap/SR1/SYS/global/hdb/federation:/usr/sap/SR1/SYS/global/hdb/plugins/3rd_party_libs\nrh2adm /usr/sap/RH2/SYS/exe/run:/usr/sap/RH2/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH2/lib' ``` I'm unsure if it's OK to modify the [load](https://github.com/RedHatInsights/insights-core/blob/6dcb0a35b9bf8089a860fbb8b7364029076b9b72/insights/core/spec_factory.py#L202-L205) method to read the file to a list directly: ```python self.loaded = True with open(self.path, 'r') as f: return f.readlines() ``` @csams , @bfahr - would you please have a look?
[ { "content": "import itertools\nimport logging\nimport os\nimport re\nimport six\nimport traceback\nimport codecs\n\nfrom collections import defaultdict\nfrom glob import glob\nfrom subprocess import call\n\nfrom insights.core import blacklist, dr\nfrom insights.core.filters import _add_filter, get_filters\nfrom insights.core.context import ExecutionContext, FSRoots, HostContext\nfrom insights.core.plugins import component, datasource, ContentException, is_datasource\nfrom insights.util import fs, streams, which\nfrom insights.util.subproc import Pipeline\nfrom insights.core.serde import deserializer, serializer\nimport shlex\n\nlog = logging.getLogger(__name__)\n\n\nSAFE_ENV = {\n \"PATH\": os.path.pathsep.join([\n \"/bin\",\n \"/usr/bin\",\n \"/sbin\",\n \"/usr/sbin\",\n \"/usr/share/Modules/bin\",\n ]),\n \"LC_ALL\": \"C\",\n}\n\"\"\"\nA minimal set of environment variables for use in subprocess calls\n\"\"\"\nif \"LANG\" in os.environ:\n SAFE_ENV[\"LANG\"] = os.environ[\"LANG\"]\n\n\ndef enc(s):\n escape_encoding = \"string_escape\" if six.PY2 else \"unicode_escape\"\n return s.encode(escape_encoding)\n\n\ndef escape(s):\n return re.sub(r\"([=\\(\\)|\\-_!@*~\\\"&/\\\\\\^\\$\\=])\", r\"\\\\\\1\", s)\n\n\ndef mangle_command(command, name_max=255):\n \"\"\"\n Mangle a command line string into something suitable for use as the basename of a filename.\n At minimum this function must remove slashes, but it also does other things to clean\n the basename: removing directory names from the command name, replacing many non-\n characters with undersores, in addition to replacing slashes with dots.\n\n By default, curly braces, '{' and '}', are replaced with underscore, set 'has_variables'\n to leave curly braces alone.\n\n This function was copied from the function that insights-client uses to create the name it\n to capture the output of the command.\n\n Here, server side, it is used to figure out what file in the archive contains the output\n a command. Server side, the command may contain references to variables (names\n matching curly braces) that will be expanded before the name is actually used as a file name.\n\n To completly mimic the insights-client behavior, curly braces need to be replaced\n underscores. If the command has variable references, the curly braces must be left alone.\n Set has_variables, to leave curly braces alone.\n\n This implementation of 'has_variables' assumes that variable names only contain\n that are not replaced by mangle_command.\n \"\"\"\n pattern = r\"[^\\w\\-\\.\\/]+\"\n\n mangledname = re.sub(r\"^/(usr/|)(bin|sbin)/\", \"\", command)\n mangledname = re.sub(pattern, \"_\", mangledname)\n mangledname = re.sub(r\"/\", \".\", mangledname).strip(\" ._-\")\n mangledname = mangledname[:name_max]\n return mangledname\n\n\nclass ContentProvider(object):\n def __init__(self):\n self.cmd = None\n self.args = None\n self.rc = None\n self.root = None\n self.relative_path = None\n self.loaded = False\n self._content = None\n self._exception = None\n\n def load(self):\n raise NotImplementedError()\n\n def stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n st = self._stream()\n for l in next(st):\n yield l.rstrip(\"\\n\")\n\n def _stream(self):\n raise NotImplementedError()\n\n @property\n def path(self):\n return os.path.join(self.root, self.relative_path)\n\n @property\n def content(self):\n if self._exception:\n raise self._exception\n\n if self._content is None:\n try:\n self._content = self.load()\n except Exception as ex:\n self._exception = ex\n raise\n\n return self._content\n\n def __repr__(self):\n msg = \"<%s(path=%r, cmd=%r)>\"\n return msg % (self.__class__.__name__, self.path or \"\", self.cmd or \"\")\n\n def __unicode__(self):\n return self.__repr__()\n\n def __str__(self):\n return self.__unicode__()\n\n\nclass DatasourceProvider(ContentProvider):\n def __init__(self, content, relative_path, root='/', ds=None, ctx=None):\n super(DatasourceProvider, self).__init__()\n self.relative_path = relative_path\n self._content = content if isinstance(content, list) else content.splitlines()\n self.root = root\n self.ds = ds\n self.ctx = ctx\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n yield self._content\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n with open(dst, \"wb\") as f:\n f.write(\"\\n\".join(self.content).encode(\"utf-8\"))\n\n self.loaded = False\n self._content = None\n\n def load(self):\n return self.content\n\n\nclass FileProvider(ContentProvider):\n def __init__(self, relative_path, root=\"/\", ds=None, ctx=None):\n super(FileProvider, self).__init__()\n self.root = root\n self.relative_path = relative_path.lstrip(\"/\")\n self.file_name = os.path.basename(self.path)\n\n self.ds = ds\n self.ctx = ctx\n self.validate()\n\n def validate(self):\n if not blacklist.allow_file(\"/\" + self.relative_path):\n log.warning(\"WARNING: Skipping file %s\", \"/\" + self.relative_path)\n raise dr.SkipComponent()\n\n if not os.path.exists(self.path):\n raise ContentException(\"%s does not exist.\" % self.path)\n\n resolved = os.path.realpath(self.path)\n if not resolved.startswith(os.path.realpath(self.root)):\n msg = \"Relative path points outside the root: %s -> %s.\"\n raise Exception(msg % (self.path, resolved))\n\n if not os.access(self.path, os.R_OK):\n raise ContentException(\"Cannot access %s\" % self.path)\n\n def __repr__(self):\n return '%s(\"%r\")' % (self.__class__.__name__, self.path)\n\n\nclass RawFileProvider(FileProvider):\n \"\"\"\n Class used in datasources that returns the contents of a file a single\n string. The file is not filtered.\n \"\"\"\n\n def load(self):\n self.loaded = True\n with open(self.path, 'rb') as f:\n return f.read()\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n call([which(\"cp\", env=SAFE_ENV), self.path, dst], env=SAFE_ENV)\n\n\nclass TextFileProvider(FileProvider):\n \"\"\"\n Class used in datasources that returns the contents of a file a list of\n lines. Each line is filtered if filters are defined for the datasource.\n \"\"\"\n\n def create_args(self):\n args = []\n filters = \"\\n\".join(get_filters(self.ds)) if self.ds else None\n if filters:\n args.append([\"grep\", \"-F\", filters, self.path])\n\n patterns = \"\\n\".join(blacklist.get_disallowed_patterns())\n if patterns:\n grep = [\"grep\", \"-v\", \"-F\", patterns]\n if not args:\n grep.append(self.path)\n args.append(grep)\n\n keywords = blacklist.get_disallowed_keywords()\n if keywords:\n sed = [\"sed\"]\n for kw in keywords:\n sed.extend([\"-e\", \"s/%s/keyword/g\" % kw.replace(\"/\", \"\\\\/\")])\n if not args:\n sed.append(self.path)\n args.append(sed)\n return args\n\n def load(self):\n self.loaded = True\n args = self.create_args()\n if args:\n rc, out = self.ctx.shell_out(args, keep_rc=True, env=SAFE_ENV)\n self.rc = rc\n return out\n if six.PY3:\n with open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n return [l.rstrip(\"\\n\") for l in f]\n else:\n with codecs.open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n return [l.rstrip(\"\\n\") for l in f]\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n if self._exception:\n raise self._exception\n try:\n if self._content:\n yield self._content\n else:\n args = self.create_args()\n if args:\n with streams.connect(*args, env=SAFE_ENV) as s:\n yield s\n else:\n if six.PY3:\n with open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n yield f\n else:\n with codecs.open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n yield f\n except StopIteration:\n raise\n except Exception as ex:\n self._exception = ex\n raise ContentException(str(ex))\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n args = self.create_args()\n if args:\n p = Pipeline(*args, env=SAFE_ENV)\n p.write(dst)\n else:\n call([which(\"cp\", env=SAFE_ENV), self.path, dst], env=SAFE_ENV)\n\n\nclass SerializedOutputProvider(TextFileProvider):\n def create_args(self):\n pass\n\n\nclass SerializedRawOutputProvider(RawFileProvider):\n pass\n\n\nclass CommandOutputProvider(ContentProvider):\n \"\"\"\n Class used in datasources to return output from commands.\n \"\"\"\n def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, timeout=None, inherit_env=None):\n super(CommandOutputProvider, self).__init__()\n self.cmd = cmd\n self.root = \"insights_commands\"\n self.relative_path = os.path.join(\"insights_commands\", mangle_command(cmd))\n self.ctx = ctx\n self.args = args # already interpolated into cmd - stored here for context.\n self.split = split\n self.keep_rc = keep_rc\n self.ds = ds\n self.timeout = timeout\n self.inherit_env = inherit_env or []\n\n self._content = None\n self.rc = None\n\n self.validate()\n\n def validate(self):\n if not blacklist.allow_command(self.cmd):\n log.warning(\"WARNING: Skipping command %s\", self.cmd)\n raise dr.SkipComponent()\n\n if not which(shlex.split(self.cmd)[0], env=self.create_env()):\n raise ContentException(\"Couldn't execute: %s\" % self.cmd)\n\n def create_args(self):\n command = [shlex.split(self.cmd)]\n\n if self.split:\n filters = \"\\n\".join(get_filters(self.ds))\n if filters:\n command.append([\"grep\", \"-F\", filters])\n\n patterns = \"\\n\".join(blacklist.get_disallowed_patterns())\n if patterns:\n command.append([\"grep\", \"-v\", \"-F\", patterns])\n\n keywords = blacklist.get_disallowed_keywords()\n if keywords:\n sed = [\"sed\"]\n for kw in keywords:\n sed.extend([\"-e\", \"s/%s/keyword/g\" % kw.replace(\"/\", \"\\\\/\")])\n command.append(sed)\n return command\n\n def create_env(self):\n env = dict(SAFE_ENV)\n for e in self.inherit_env:\n if e in os.environ:\n env[e] = os.environ[e]\n return env\n\n def load(self):\n command = self.create_args()\n\n raw = self.ctx.shell_out(command, split=self.split, keep_rc=self.keep_rc,\n timeout=self.timeout, env=self.create_env())\n if self.keep_rc:\n self.rc, output = raw\n else:\n output = raw\n return output\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n if self._exception:\n raise self._exception\n try:\n if self._content:\n yield self._content\n else:\n args = self.create_args()\n with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:\n yield s\n except StopIteration:\n raise\n except Exception as ex:\n self._exception = ex\n raise ContentException(str(ex))\n\n def write(self, dst):\n args = self.create_args()\n fs.ensure_path(os.path.dirname(dst))\n if args:\n timeout = self.timeout or self.ctx.timeout\n p = Pipeline(*args, timeout=timeout, env=self.create_env())\n return p.write(dst, keep_rc=self.keep_rc)\n\n def __repr__(self):\n return 'CommandOutputProvider(\"%r\")' % self.cmd\n\n\nclass RegistryPoint(object):\n # Marker class for declaring that an element of a `SpecSet` subclass\n # is a registry point against which further subclasses can register\n # datasource implementations by simply declaring them with the same name.\n #\n # intentionally not a docstring so this doesn't show up in pydoc.\n def __init__(self, metadata=None, multi_output=False, raw=False,\n filterable=False):\n self.metadata = metadata\n self.multi_output = multi_output\n self.raw = raw\n self.filterable = filterable\n self.__name__ = self.__class__.__name__\n datasource([], metadata=metadata, multi_output=multi_output, raw=raw,\n filterable=filterable)(self)\n\n def __call__(self, broker):\n for c in reversed(dr.get_delegate(self).deps):\n if c in broker:\n return broker[c]\n raise dr.SkipComponent()\n\n def __repr__(self):\n return dr.get_name(self)\n\n\nclass SpecDescriptor(object):\n # Descriptor Protocol handler that returns the literal function from a\n # class during dot (.) access.\n #\n # intentionally not a docstring so this doesn't show up in pydoc.\n def __init__(self, func):\n self.func = func\n\n def __get__(self, obj, obj_type):\n return self.func\n\n def __set__(self, obj, val):\n raise AttributeError()\n\n\ndef _get_ctx_dependencies(component):\n ctxs = set()\n for c in dr.walk_tree(component):\n try:\n if issubclass(c, ExecutionContext):\n ctxs.add(c)\n except:\n pass\n return ctxs\n\n\ndef _register_context_handler(parents, component):\n name = component.__name__\n parents = list(itertools.takewhile(lambda x: name in x.registry, parents))\n if not parents:\n return\n\n # If the new component handles a context, we need to tell the\n # previously registered components that would have handled it to ignore it.\n\n # The components that handle a context are registered on the highest class\n # of the MRO list. This is so overrides work correctly even if a\n # component isn't a direct sibling of the component it's overriding.\n\n # instead of trying to unhook all of the dependencies, we just tell the\n # previous handler of a context to ignore it.\n ctx_handlers = parents[-1].context_handlers\n for c in _get_ctx_dependencies(component):\n for old in ctx_handlers[name][c]:\n dr.add_ignore(old, c)\n ctx_handlers[name][c].append(component)\n\n\ndef _resolve_registry_points(cls, base, dct):\n module = cls.__module__\n parents = [x for x in cls.__mro__ if x not in (cls, SpecSet, object)]\n\n for k, v in dct.items():\n if isinstance(v, RegistryPoint):\n # add v under its name to this class's registry.\n v.__name__ = k\n cls.registry[k] = v\n\n if is_datasource(v):\n v.__qualname__ = \".\".join([cls.__name__, k])\n v.__name__ = k\n v.__module__ = module\n setattr(cls, k, SpecDescriptor(v))\n if k in base.registry:\n # if the datasource has the same name as a RegistryPoint in the\n # base class, the datasource to the RegistryPoint.\n point = base.registry[k]\n\n # TODO: log when RegistryPoint and implementation properties\n # TODO: aren't the same.\n delegate = dr.get_delegate(v)\n v.filterable = delegate.filterable = point.filterable\n v.raw = delegate.raw = point.raw\n v.multi_output = delegate.multi_output = point.multi_output\n\n # the RegistryPoint gets the implementation datasource as a\n # dependency\n dr.add_dependency(point, v)\n\n # Datasources override previously defined datasources of the\n # same name for contexts they all depend on. Here we tell\n # datasources of the same name not to execute under contexts\n # the new datasource will handle.\n _register_context_handler(parents, v)\n\n\nclass SpecSetMeta(type):\n \"\"\"\n The metaclass that converts RegistryPoint markers to registry point\n datasources and hooks implementations for them into the registry.\n \"\"\"\n def __new__(cls, name, bases, dct):\n dct[\"context_handlers\"] = defaultdict(lambda: defaultdict(list))\n dct[\"registry\"] = {}\n return super(SpecSetMeta, cls).__new__(cls, name, bases, dct)\n\n def __init__(cls, name, bases, dct):\n if name == \"SpecSet\":\n return\n if len(bases) > 1:\n raise Exception(\"SpecSet subclasses must inherit from only one class.\")\n _resolve_registry_points(cls, bases[0], dct)\n\n\nclass SpecSet(six.with_metaclass(SpecSetMeta)):\n \"\"\"\n The base class for all spec declarations. Extend this class and define your\n datasources directly or with a `SpecFactory`.\n \"\"\"\n pass\n\n\ndef _get_context(context, broker):\n if isinstance(context, list):\n return dr.first_of(context, broker)\n return broker.get(context)\n\n\nclass simple_file(object):\n \"\"\"\n Creates a datasource that reads the file at path when evaluated.\n\n Args:\n path (str): path to the file to read\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n\n Returns:\n function: A datasource that reads all files matching the glob patterns.\n \"\"\"\n def __init__(self, path, context=None, deps=[], kind=TextFileProvider, **kwargs):\n self.path = path\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n return self.kind(ctx.locate_path(self.path), root=ctx.root, ds=self, ctx=ctx)\n\n\nclass glob_file(object):\n \"\"\"\n Creates a datasource that reads all files matching the glob pattern(s).\n\n Args:\n patterns (str or [str]): glob pattern(s) of paths to read.\n ignore (regex): a regular expression that is used to filter the paths\n matched by pattern(s).\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n max_files (int): Maximum number of glob files to process.\n\n Returns:\n function: A datasource that reads all files matching the glob patterns.\n \"\"\"\n def __init__(self, patterns, ignore=None, context=None, deps=[], kind=TextFileProvider, max_files=1000, **kwargs):\n if not isinstance(patterns, (list, set)):\n patterns = [patterns]\n self.patterns = patterns\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.max_files = max_files\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n root = ctx.root\n results = []\n for pattern in self.patterns:\n pattern = ctx.locate_path(pattern)\n for path in sorted(glob(os.path.join(root, pattern.lstrip('/')))):\n if self.ignore_func(path) or os.path.isdir(path):\n continue\n try:\n results.append(self.kind(path[len(root):], root=root, ds=self, ctx=ctx))\n except:\n log.debug(traceback.format_exc())\n if results:\n if len(results) > self.max_files:\n raise ContentException(\"Number of files returned [{0}] is over the {1} file limit, please refine \"\n \"the specs file pattern to narrow down results\".format(len(results), self.max_files))\n return results\n raise ContentException(\"[%s] didn't match.\" % ', '.join(self.patterns))\n\n\nclass head(object):\n \"\"\"\n Return the first element of any datasource that produces a list.\n \"\"\"\n def __init__(self, dep, **kwargs):\n self.dep = dep\n self.__name__ = self.__class__.__name__\n datasource(dep, **kwargs)(self)\n\n def __call__(self, lst):\n c = lst[self.dep]\n if lst:\n return c[0]\n raise dr.SkipComponent()\n\n\nclass first_file(object):\n \"\"\"\n Creates a datasource that returns the first existing and readable file in\n files.\n\n Args:\n files (str): list of paths to find and read\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n\n Returns:\n function: A datasource that returns the first file in files that exists\n and is readable\n \"\"\"\n\n def __init__(self, paths, context=None, deps=[], kind=TextFileProvider, **kwargs):\n self.paths = paths\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n root = ctx.root\n for p in self.paths:\n try:\n return self.kind(ctx.locate_path(p), root=root, ds=self, ctx=ctx)\n except:\n pass\n raise ContentException(\"None of [%s] found.\" % ', '.join(self.paths))\n\n\nclass listdir(object):\n \"\"\"\n Execute a simple directory listing of all the files and directories in\n path.\n\n Args:\n path (str): directory or glob pattern to list.\n context (ExecutionContext): the context under which the datasource\n should run.\n ignore (str): regular expression defining paths to ignore.\n\n Returns:\n function: A datasource that returns the list of files and directories\n in the directory specified by path\n \"\"\"\n\n def __init__(self, path, context=None, ignore=None, deps=[]):\n self.path = path\n self.context = context or FSRoots\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n p = os.path.join(ctx.root, self.path.lstrip('/'))\n p = ctx.locate_path(p)\n result = sorted(os.listdir(p)) if os.path.isdir(p) else sorted(glob(p))\n\n if result:\n return [os.path.basename(r) for r in result if not self.ignore_func(r)]\n raise ContentException(\"Can't list %s or nothing there.\" % p)\n\n\nclass simple_command(object):\n \"\"\"\n Execute a simple command that has no dynamic arguments\n\n Args:\n cmd (str): the command(s) to execute. Breaking apart a command\n string that might contain multiple commands separated by a pipe,\n getting them ready for subproc operations.\n IE. A command with filters applied\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n Returns:\n function: A datasource that returns the output of a command that takes\n no arguments\n \"\"\"\n\n def __init__(self, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs):\n self.cmd = cmd\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = broker[self.context]\n return CommandOutputProvider(self.cmd, ctx, split=self.split,\n keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env)\n\n\nclass command_with_args(object):\n \"\"\"\n Execute a command that has dynamic arguments\n\n Args:\n cmd (str): the command to execute. Breaking apart a command\n string that might require arguments.\n provider (str or tuple): argument string or a tuple of argument strings.\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n Returns:\n function: A datasource that returns the output of a command that takes\n specified arguments passed by the provider.\n \"\"\"\n\n def __init__(self, cmd, provider, context=HostContext, deps=None, split=True, keep_rc=False, timeout=None, inherit_env=None, **kwargs):\n deps = deps if deps is not None else []\n self.cmd = cmd\n self.provider = provider\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env if inherit_env is not None else []\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n source = broker[self.provider]\n ctx = broker[self.context]\n if not isinstance(source, (str, tuple)):\n raise ContentException(\"The provider can only be a single string or a tuple of strings, but got '%s'.\" % source)\n try:\n self.cmd = self.cmd % source\n return CommandOutputProvider(self.cmd, ctx, split=self.split,\n keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env)\n except:\n log.debug(traceback.format_exc())\n raise ContentException(\"No results found for [%s]\" % self.cmd)\n\n\nclass foreach_execute(object):\n \"\"\"\n Execute a command for each element in provider. Provider is the output of\n a different datasource that returns a list of single elements or a list of\n tuples. The command should have %s substitution parameters equal to the\n number of elements in each tuple of the provider.\n\n Args:\n provider (list): a list of elements or tuples.\n cmd (str): a command with substitution parameters. Breaking\n apart a command string that might contain multiple commands\n separated by a pipe, getting them ready for subproc operations.\n IE. A command with filters applied\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n\n Returns:\n function: A datasource that returns a list of outputs for each command\n created by substituting each element of provider into the cmd template.\n \"\"\"\n\n def __init__(self, provider, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs):\n self.provider = provider\n self.cmd = cmd\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n result = []\n source = broker[self.provider]\n ctx = broker[self.context]\n if isinstance(source, ContentProvider):\n source = source.content\n if not isinstance(source, (list, set)):\n source = [source]\n for e in source:\n try:\n the_cmd = self.cmd % e\n cop = CommandOutputProvider(the_cmd, ctx, args=e,\n split=self.split, keep_rc=self.keep_rc, ds=self,\n timeout=self.timeout, inherit_env=self.inherit_env)\n result.append(cop)\n except:\n log.debug(traceback.format_exc())\n if result:\n return result\n raise ContentException(\"No results found for [%s]\" % self.cmd)\n\n\nclass foreach_collect(object):\n \"\"\"\n Subtitutes each element in provider into path and collects the files at the\n resulting paths.\n\n Args:\n provider (list): a list of elements or tuples.\n path (str): a path template with substitution parameters.\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): one of TextFileProvider or RawFileProvider\n\n Returns:\n function: A datasource that returns a list of file contents created by\n substituting each element of provider into the path template.\n \"\"\"\n\n def __init__(self, provider, path, ignore=None, context=HostContext, deps=[], kind=TextFileProvider, **kwargs):\n self.provider = provider\n self.path = path\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.context = context\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n result = []\n source = broker[self.provider]\n ctx = _get_context(self.context, broker)\n root = ctx.root\n if isinstance(source, ContentProvider):\n source = source.content\n if not isinstance(source, (list, set)):\n source = [source]\n for e in source:\n pattern = ctx.locate_path(self.path % e)\n for p in glob(os.path.join(root, pattern.lstrip('/'))):\n if self.ignore_func(p) or os.path.isdir(p):\n continue\n try:\n result.append(self.kind(p[len(root):], root=root, ds=self, ctx=ctx))\n except:\n log.debug(traceback.format_exc())\n if result:\n return result\n raise ContentException(\"No results found for [%s]\" % self.path)\n\n\nclass first_of(object):\n \"\"\" Given a list of dependencies, returns the first of the list\n that exists in the broker. At least one must be present, or this\n component won't fire.\n \"\"\"\n def __init__(self, deps):\n self.deps = deps\n self.raw = deps[0].raw\n self.__name__ = self.__class__.__name__\n datasource(deps)(self)\n\n def __call__(self, broker):\n for c in self.deps:\n if c in broker:\n return broker[c]\n\n\nclass find(object):\n \"\"\"\n Helper class for extracting specific lines from a datasource for direct\n consumption by a rule.\n\n .. code:: python\n\n service_starts = find(Specs.audit_log, \"SERVICE_START\")\n\n @rule(service_starts)\n def report(starts):\n return make_info(\"SERVICE_STARTS\", num_starts=len(starts))\n\n Args:\n spec (datasource): some datasource, ideally filterable.\n pattern (string / list): a string or list of strings to match (no\n patterns supported)\n\n Returns:\n A dict where each key is a command, path, or spec name, and each value\n is a non-empty list of matching lines. Only paths with matching lines\n are included.\n\n Raises:\n dr.SkipComponent if no paths have matching lines.\n \"\"\"\n\n def __init__(self, spec, pattern):\n if getattr(spec, \"raw\", False):\n name = dr.get_name(spec)\n raise ValueError(\"{}: Cannot filter raw files.\".format(name))\n\n self.spec = spec\n self.pattern = pattern if isinstance(pattern, list) else [pattern]\n self.__name__ = self.__class__.__name__\n self.__module__ = self.__class__.__module__\n\n if getattr(spec, \"filterable\", False):\n _add_filter(spec, pattern)\n\n component(spec)(self)\n\n def __call__(self, ds):\n # /usr/bin/grep level filtering is applied behind .content or\n # .stream(), but we still need to ensure we get only what *this* find\n # instance wants. This can be inefficient on files where many lines\n # match.\n results = {}\n ds = ds if isinstance(ds, list) else [ds]\n for d in ds:\n if d.relative_path:\n origin = os.path.join(\"/\", d.relative_path.lstrip(\"/\"))\n elif d.cmd:\n origin = d.cmd\n else:\n origin = dr.get_name(self.spec)\n stream = d.content if d.loaded else d.stream()\n lines = []\n for line in stream:\n if any(p in line for p in self.pattern):\n lines.append(line)\n if lines:\n results[origin] = lines\n if not results:\n raise dr.SkipComponent()\n return dict(results)\n\n\n@serializer(CommandOutputProvider)\ndef serialize_command_output(obj, root):\n rel = os.path.join(\"insights_commands\", mangle_command(obj.cmd))\n dst = os.path.join(root, rel)\n rc = obj.write(dst)\n return {\n \"rc\": rc,\n \"cmd\": obj.cmd,\n \"args\": obj.args,\n \"relative_path\": rel\n }\n\n\n@deserializer(CommandOutputProvider)\ndef deserialize_command_output(_type, data, root):\n rel = data[\"relative_path\"]\n\n res = SerializedOutputProvider(rel, root)\n\n res.rc = data[\"rc\"]\n res.cmd = data[\"cmd\"]\n res.args = data[\"args\"]\n return res\n\n\n@serializer(TextFileProvider)\ndef serialize_text_file_provider(obj, root):\n dst = os.path.join(root, obj.relative_path)\n rc = obj.write(dst)\n return {\n \"relative_path\": obj.relative_path,\n \"rc\": rc,\n }\n\n\n@deserializer(TextFileProvider)\ndef deserialize_text_provider(_type, data, root):\n rel = data[\"relative_path\"]\n res = SerializedOutputProvider(rel, root)\n res.rc = data[\"rc\"]\n return res\n\n\n@serializer(RawFileProvider)\ndef serialize_raw_file_provider(obj, root):\n dst = os.path.join(root, obj.relative_path)\n rc = obj.write(dst)\n return {\n \"relative_path\": obj.relative_path,\n \"rc\": rc,\n }\n\n\n@deserializer(RawFileProvider)\ndef deserialize_raw_file_provider(_type, data, root):\n rel = data[\"relative_path\"]\n res = SerializedRawOutputProvider(rel, root)\n res.rc = data[\"rc\"]\n return res\n\n\n@serializer(DatasourceProvider)\ndef serialize_datasource_provider(obj, root):\n dst = os.path.join(root, obj.relative_path.lstrip(\"/\"))\n fs.ensure_path(os.path.dirname(dst))\n obj.write(dst)\n return {\"relative_path\": obj.relative_path}\n\n\n@deserializer(DatasourceProvider)\ndef deserialize_datasource_provider(_type, data, root):\n return SerializedRawOutputProvider(data[\"relative_path\"], root)\n", "path": "insights/core/spec_factory.py" } ]
[ { "content": "import itertools\nimport logging\nimport os\nimport re\nimport six\nimport traceback\nimport codecs\n\nfrom collections import defaultdict\nfrom glob import glob\nfrom subprocess import call\n\nfrom insights.core import blacklist, dr\nfrom insights.core.filters import _add_filter, get_filters\nfrom insights.core.context import ExecutionContext, FSRoots, HostContext\nfrom insights.core.plugins import component, datasource, ContentException, is_datasource\nfrom insights.util import fs, streams, which\nfrom insights.util.subproc import Pipeline\nfrom insights.core.serde import deserializer, serializer\nimport shlex\n\nlog = logging.getLogger(__name__)\n\n\nSAFE_ENV = {\n \"PATH\": os.path.pathsep.join([\n \"/bin\",\n \"/usr/bin\",\n \"/sbin\",\n \"/usr/sbin\",\n \"/usr/share/Modules/bin\",\n ]),\n \"LC_ALL\": \"C\",\n}\n\"\"\"\nA minimal set of environment variables for use in subprocess calls\n\"\"\"\nif \"LANG\" in os.environ:\n SAFE_ENV[\"LANG\"] = os.environ[\"LANG\"]\n\n\ndef enc(s):\n escape_encoding = \"string_escape\" if six.PY2 else \"unicode_escape\"\n return s.encode(escape_encoding)\n\n\ndef escape(s):\n return re.sub(r\"([=\\(\\)|\\-_!@*~\\\"&/\\\\\\^\\$\\=])\", r\"\\\\\\1\", s)\n\n\ndef mangle_command(command, name_max=255):\n \"\"\"\n Mangle a command line string into something suitable for use as the basename of a filename.\n At minimum this function must remove slashes, but it also does other things to clean\n the basename: removing directory names from the command name, replacing many non-\n characters with undersores, in addition to replacing slashes with dots.\n\n By default, curly braces, '{' and '}', are replaced with underscore, set 'has_variables'\n to leave curly braces alone.\n\n This function was copied from the function that insights-client uses to create the name it\n to capture the output of the command.\n\n Here, server side, it is used to figure out what file in the archive contains the output\n a command. Server side, the command may contain references to variables (names\n matching curly braces) that will be expanded before the name is actually used as a file name.\n\n To completly mimic the insights-client behavior, curly braces need to be replaced\n underscores. If the command has variable references, the curly braces must be left alone.\n Set has_variables, to leave curly braces alone.\n\n This implementation of 'has_variables' assumes that variable names only contain\n that are not replaced by mangle_command.\n \"\"\"\n pattern = r\"[^\\w\\-\\.\\/]+\"\n\n mangledname = re.sub(r\"^/(usr/|)(bin|sbin)/\", \"\", command)\n mangledname = re.sub(pattern, \"_\", mangledname)\n mangledname = re.sub(r\"/\", \".\", mangledname).strip(\" ._-\")\n mangledname = mangledname[:name_max]\n return mangledname\n\n\nclass ContentProvider(object):\n def __init__(self):\n self.cmd = None\n self.args = None\n self.rc = None\n self.root = None\n self.relative_path = None\n self.loaded = False\n self._content = None\n self._exception = None\n\n def load(self):\n raise NotImplementedError()\n\n def stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n st = self._stream()\n for l in next(st):\n yield l.rstrip(\"\\n\")\n\n def _stream(self):\n raise NotImplementedError()\n\n @property\n def path(self):\n return os.path.join(self.root, self.relative_path)\n\n @property\n def content(self):\n if self._exception:\n raise self._exception\n\n if self._content is None:\n try:\n self._content = self.load()\n except Exception as ex:\n self._exception = ex\n raise\n\n return self._content\n\n def __repr__(self):\n msg = \"<%s(path=%r, cmd=%r)>\"\n return msg % (self.__class__.__name__, self.path or \"\", self.cmd or \"\")\n\n def __unicode__(self):\n return self.__repr__()\n\n def __str__(self):\n return self.__unicode__()\n\n\nclass DatasourceProvider(ContentProvider):\n def __init__(self, content, relative_path, root='/', ds=None, ctx=None):\n super(DatasourceProvider, self).__init__()\n self.relative_path = relative_path\n self._content = content if isinstance(content, list) else content.splitlines()\n self.root = root\n self.ds = ds\n self.ctx = ctx\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n yield self._content\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n with open(dst, \"wb\") as f:\n f.write(\"\\n\".join(self.content).encode(\"utf-8\"))\n\n self.loaded = False\n self._content = None\n\n def load(self):\n return self.content\n\n\nclass FileProvider(ContentProvider):\n def __init__(self, relative_path, root=\"/\", ds=None, ctx=None):\n super(FileProvider, self).__init__()\n self.root = root\n self.relative_path = relative_path.lstrip(\"/\")\n self.file_name = os.path.basename(self.path)\n\n self.ds = ds\n self.ctx = ctx\n self.validate()\n\n def validate(self):\n if not blacklist.allow_file(\"/\" + self.relative_path):\n log.warning(\"WARNING: Skipping file %s\", \"/\" + self.relative_path)\n raise dr.SkipComponent()\n\n if not os.path.exists(self.path):\n raise ContentException(\"%s does not exist.\" % self.path)\n\n resolved = os.path.realpath(self.path)\n if not resolved.startswith(os.path.realpath(self.root)):\n msg = \"Relative path points outside the root: %s -> %s.\"\n raise Exception(msg % (self.path, resolved))\n\n if not os.access(self.path, os.R_OK):\n raise ContentException(\"Cannot access %s\" % self.path)\n\n def __repr__(self):\n return '%s(\"%r\")' % (self.__class__.__name__, self.path)\n\n\nclass RawFileProvider(FileProvider):\n \"\"\"\n Class used in datasources that returns the contents of a file a single\n string. The file is not filtered.\n \"\"\"\n\n def load(self):\n self.loaded = True\n with open(self.path, 'rb') as f:\n return f.read()\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n call([which(\"cp\", env=SAFE_ENV), self.path, dst], env=SAFE_ENV)\n\n\nclass TextFileProvider(FileProvider):\n \"\"\"\n Class used in datasources that returns the contents of a file a list of\n lines. Each line is filtered if filters are defined for the datasource.\n \"\"\"\n\n def create_args(self):\n args = []\n filters = \"\\n\".join(get_filters(self.ds)) if self.ds else None\n if filters:\n args.append([\"grep\", \"-F\", filters, self.path])\n\n patterns = \"\\n\".join(blacklist.get_disallowed_patterns())\n if patterns:\n grep = [\"grep\", \"-v\", \"-F\", patterns]\n if not args:\n grep.append(self.path)\n args.append(grep)\n\n keywords = blacklist.get_disallowed_keywords()\n if keywords:\n sed = [\"sed\"]\n for kw in keywords:\n sed.extend([\"-e\", \"s/%s/keyword/g\" % kw.replace(\"/\", \"\\\\/\")])\n if not args:\n sed.append(self.path)\n args.append(sed)\n return args\n\n def load(self):\n self.loaded = True\n args = self.create_args()\n if args:\n rc, out = self.ctx.shell_out(args, keep_rc=True, env=SAFE_ENV)\n self.rc = rc\n return out\n if six.PY3:\n with open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n return [l.rstrip(\"\\n\") for l in f]\n else:\n with codecs.open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n return [l.rstrip(\"\\n\") for l in f]\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n if self._exception:\n raise self._exception\n try:\n if self._content:\n yield self._content\n else:\n args = self.create_args()\n if args:\n with streams.connect(*args, env=SAFE_ENV) as s:\n yield s\n else:\n if six.PY3:\n with open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n yield f\n else:\n with codecs.open(self.path, \"r\", encoding=\"utf-8\", errors=\"surrogateescape\") as f:\n yield f\n except StopIteration:\n raise\n except Exception as ex:\n self._exception = ex\n raise ContentException(str(ex))\n\n def write(self, dst):\n fs.ensure_path(os.path.dirname(dst))\n args = self.create_args()\n if args:\n p = Pipeline(*args, env=SAFE_ENV)\n p.write(dst)\n else:\n call([which(\"cp\", env=SAFE_ENV), self.path, dst], env=SAFE_ENV)\n\n\nclass SerializedOutputProvider(TextFileProvider):\n def create_args(self):\n pass\n\n\nclass SerializedRawOutputProvider(RawFileProvider):\n pass\n\n\nclass CommandOutputProvider(ContentProvider):\n \"\"\"\n Class used in datasources to return output from commands.\n \"\"\"\n def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, timeout=None, inherit_env=None):\n super(CommandOutputProvider, self).__init__()\n self.cmd = cmd\n self.root = \"insights_commands\"\n self.relative_path = os.path.join(\"insights_commands\", mangle_command(cmd))\n self.ctx = ctx\n self.args = args # already interpolated into cmd - stored here for context.\n self.split = split\n self.keep_rc = keep_rc\n self.ds = ds\n self.timeout = timeout\n self.inherit_env = inherit_env or []\n\n self._content = None\n self.rc = None\n\n self.validate()\n\n def validate(self):\n if not blacklist.allow_command(self.cmd):\n log.warning(\"WARNING: Skipping command %s\", self.cmd)\n raise dr.SkipComponent()\n\n if not which(shlex.split(self.cmd)[0], env=self.create_env()):\n raise ContentException(\"Couldn't execute: %s\" % self.cmd)\n\n def create_args(self):\n command = [shlex.split(self.cmd)]\n\n if self.split:\n filters = \"\\n\".join(get_filters(self.ds))\n if filters:\n command.append([\"grep\", \"-F\", filters])\n\n patterns = \"\\n\".join(blacklist.get_disallowed_patterns())\n if patterns:\n command.append([\"grep\", \"-v\", \"-F\", patterns])\n\n keywords = blacklist.get_disallowed_keywords()\n if keywords:\n sed = [\"sed\"]\n for kw in keywords:\n sed.extend([\"-e\", \"s/%s/keyword/g\" % kw.replace(\"/\", \"\\\\/\")])\n command.append(sed)\n return command\n\n def create_env(self):\n env = dict(SAFE_ENV)\n for e in self.inherit_env:\n if e in os.environ:\n env[e] = os.environ[e]\n return env\n\n def load(self):\n command = self.create_args()\n\n raw = self.ctx.shell_out(command, split=self.split, keep_rc=self.keep_rc,\n timeout=self.timeout, env=self.create_env())\n if self.keep_rc:\n self.rc, output = raw\n else:\n output = raw\n return output\n\n def _stream(self):\n \"\"\"\n Returns a generator of lines instead of a list of lines.\n \"\"\"\n if self._exception:\n raise self._exception\n try:\n if self._content:\n yield self._content\n else:\n args = self.create_args()\n with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:\n yield s\n except StopIteration:\n raise\n except Exception as ex:\n self._exception = ex\n raise ContentException(str(ex))\n\n def write(self, dst):\n args = self.create_args()\n fs.ensure_path(os.path.dirname(dst))\n if args:\n timeout = self.timeout or self.ctx.timeout\n p = Pipeline(*args, timeout=timeout, env=self.create_env())\n return p.write(dst, keep_rc=self.keep_rc)\n\n def __repr__(self):\n return 'CommandOutputProvider(\"%r\")' % self.cmd\n\n\nclass RegistryPoint(object):\n # Marker class for declaring that an element of a `SpecSet` subclass\n # is a registry point against which further subclasses can register\n # datasource implementations by simply declaring them with the same name.\n #\n # intentionally not a docstring so this doesn't show up in pydoc.\n def __init__(self, metadata=None, multi_output=False, raw=False,\n filterable=False):\n self.metadata = metadata\n self.multi_output = multi_output\n self.raw = raw\n self.filterable = filterable\n self.__name__ = self.__class__.__name__\n datasource([], metadata=metadata, multi_output=multi_output, raw=raw,\n filterable=filterable)(self)\n\n def __call__(self, broker):\n for c in reversed(dr.get_delegate(self).deps):\n if c in broker:\n return broker[c]\n raise dr.SkipComponent()\n\n def __repr__(self):\n return dr.get_name(self)\n\n\nclass SpecDescriptor(object):\n # Descriptor Protocol handler that returns the literal function from a\n # class during dot (.) access.\n #\n # intentionally not a docstring so this doesn't show up in pydoc.\n def __init__(self, func):\n self.func = func\n\n def __get__(self, obj, obj_type):\n return self.func\n\n def __set__(self, obj, val):\n raise AttributeError()\n\n\ndef _get_ctx_dependencies(component):\n ctxs = set()\n for c in dr.walk_tree(component):\n try:\n if issubclass(c, ExecutionContext):\n ctxs.add(c)\n except:\n pass\n return ctxs\n\n\ndef _register_context_handler(parents, component):\n name = component.__name__\n parents = list(itertools.takewhile(lambda x: name in x.registry, parents))\n if not parents:\n return\n\n # If the new component handles a context, we need to tell the\n # previously registered components that would have handled it to ignore it.\n\n # The components that handle a context are registered on the highest class\n # of the MRO list. This is so overrides work correctly even if a\n # component isn't a direct sibling of the component it's overriding.\n\n # instead of trying to unhook all of the dependencies, we just tell the\n # previous handler of a context to ignore it.\n ctx_handlers = parents[-1].context_handlers\n for c in _get_ctx_dependencies(component):\n for old in ctx_handlers[name][c]:\n dr.add_ignore(old, c)\n ctx_handlers[name][c].append(component)\n\n\ndef _resolve_registry_points(cls, base, dct):\n module = cls.__module__\n parents = [x for x in cls.__mro__ if x not in (cls, SpecSet, object)]\n\n for k, v in dct.items():\n if isinstance(v, RegistryPoint):\n # add v under its name to this class's registry.\n v.__name__ = k\n cls.registry[k] = v\n\n if is_datasource(v):\n v.__qualname__ = \".\".join([cls.__name__, k])\n v.__name__ = k\n v.__module__ = module\n setattr(cls, k, SpecDescriptor(v))\n if k in base.registry:\n # if the datasource has the same name as a RegistryPoint in the\n # base class, the datasource to the RegistryPoint.\n point = base.registry[k]\n\n # TODO: log when RegistryPoint and implementation properties\n # TODO: aren't the same.\n delegate = dr.get_delegate(v)\n v.filterable = delegate.filterable = point.filterable\n v.raw = delegate.raw = point.raw\n v.multi_output = delegate.multi_output = point.multi_output\n\n # the RegistryPoint gets the implementation datasource as a\n # dependency\n dr.add_dependency(point, v)\n\n # Datasources override previously defined datasources of the\n # same name for contexts they all depend on. Here we tell\n # datasources of the same name not to execute under contexts\n # the new datasource will handle.\n _register_context_handler(parents, v)\n\n\nclass SpecSetMeta(type):\n \"\"\"\n The metaclass that converts RegistryPoint markers to registry point\n datasources and hooks implementations for them into the registry.\n \"\"\"\n def __new__(cls, name, bases, dct):\n dct[\"context_handlers\"] = defaultdict(lambda: defaultdict(list))\n dct[\"registry\"] = {}\n return super(SpecSetMeta, cls).__new__(cls, name, bases, dct)\n\n def __init__(cls, name, bases, dct):\n if name == \"SpecSet\":\n return\n if len(bases) > 1:\n raise Exception(\"SpecSet subclasses must inherit from only one class.\")\n _resolve_registry_points(cls, bases[0], dct)\n\n\nclass SpecSet(six.with_metaclass(SpecSetMeta)):\n \"\"\"\n The base class for all spec declarations. Extend this class and define your\n datasources directly or with a `SpecFactory`.\n \"\"\"\n pass\n\n\ndef _get_context(context, broker):\n if isinstance(context, list):\n return dr.first_of(context, broker)\n return broker.get(context)\n\n\nclass simple_file(object):\n \"\"\"\n Creates a datasource that reads the file at path when evaluated.\n\n Args:\n path (str): path to the file to read\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n\n Returns:\n function: A datasource that reads all files matching the glob patterns.\n \"\"\"\n def __init__(self, path, context=None, deps=[], kind=TextFileProvider, **kwargs):\n self.path = path\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n return self.kind(ctx.locate_path(self.path), root=ctx.root, ds=self, ctx=ctx)\n\n\nclass glob_file(object):\n \"\"\"\n Creates a datasource that reads all files matching the glob pattern(s).\n\n Args:\n patterns (str or [str]): glob pattern(s) of paths to read.\n ignore (regex): a regular expression that is used to filter the paths\n matched by pattern(s).\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n max_files (int): Maximum number of glob files to process.\n\n Returns:\n function: A datasource that reads all files matching the glob patterns.\n \"\"\"\n def __init__(self, patterns, ignore=None, context=None, deps=[], kind=TextFileProvider, max_files=1000, **kwargs):\n if not isinstance(patterns, (list, set)):\n patterns = [patterns]\n self.patterns = patterns\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.max_files = max_files\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n root = ctx.root\n results = []\n for pattern in self.patterns:\n pattern = ctx.locate_path(pattern)\n for path in sorted(glob(os.path.join(root, pattern.lstrip('/')))):\n if self.ignore_func(path) or os.path.isdir(path):\n continue\n try:\n results.append(self.kind(path[len(root):], root=root, ds=self, ctx=ctx))\n except:\n log.debug(traceback.format_exc())\n if results:\n if len(results) > self.max_files:\n raise ContentException(\"Number of files returned [{0}] is over the {1} file limit, please refine \"\n \"the specs file pattern to narrow down results\".format(len(results), self.max_files))\n return results\n raise ContentException(\"[%s] didn't match.\" % ', '.join(self.patterns))\n\n\nclass head(object):\n \"\"\"\n Return the first element of any datasource that produces a list.\n \"\"\"\n def __init__(self, dep, **kwargs):\n self.dep = dep\n self.__name__ = self.__class__.__name__\n datasource(dep, **kwargs)(self)\n\n def __call__(self, lst):\n c = lst[self.dep]\n if lst:\n return c[0]\n raise dr.SkipComponent()\n\n\nclass first_file(object):\n \"\"\"\n Creates a datasource that returns the first existing and readable file in\n files.\n\n Args:\n files (str): list of paths to find and read\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): One of TextFileProvider or RawFileProvider.\n\n Returns:\n function: A datasource that returns the first file in files that exists\n and is readable\n \"\"\"\n\n def __init__(self, paths, context=None, deps=[], kind=TextFileProvider, **kwargs):\n self.paths = paths\n self.context = context or FSRoots\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n root = ctx.root\n for p in self.paths:\n try:\n return self.kind(ctx.locate_path(p), root=root, ds=self, ctx=ctx)\n except:\n pass\n raise ContentException(\"None of [%s] found.\" % ', '.join(self.paths))\n\n\nclass listdir(object):\n \"\"\"\n Execute a simple directory listing of all the files and directories in\n path.\n\n Args:\n path (str): directory or glob pattern to list.\n context (ExecutionContext): the context under which the datasource\n should run.\n ignore (str): regular expression defining paths to ignore.\n\n Returns:\n function: A datasource that returns the list of files and directories\n in the directory specified by path\n \"\"\"\n\n def __init__(self, path, context=None, ignore=None, deps=[]):\n self.path = path\n self.context = context or FSRoots\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps)(self)\n\n def __call__(self, broker):\n ctx = _get_context(self.context, broker)\n p = os.path.join(ctx.root, self.path.lstrip('/'))\n p = ctx.locate_path(p)\n result = sorted(os.listdir(p)) if os.path.isdir(p) else sorted(glob(p))\n\n if result:\n return [os.path.basename(r) for r in result if not self.ignore_func(r)]\n raise ContentException(\"Can't list %s or nothing there.\" % p)\n\n\nclass simple_command(object):\n \"\"\"\n Execute a simple command that has no dynamic arguments\n\n Args:\n cmd (str): the command(s) to execute. Breaking apart a command\n string that might contain multiple commands separated by a pipe,\n getting them ready for subproc operations.\n IE. A command with filters applied\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n Returns:\n function: A datasource that returns the output of a command that takes\n no arguments\n \"\"\"\n\n def __init__(self, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs):\n self.cmd = cmd\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env\n self.__name__ = self.__class__.__name__\n datasource(self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n ctx = broker[self.context]\n return CommandOutputProvider(self.cmd, ctx, split=self.split,\n keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env)\n\n\nclass command_with_args(object):\n \"\"\"\n Execute a command that has dynamic arguments\n\n Args:\n cmd (str): the command to execute. Breaking apart a command\n string that might require arguments.\n provider (str or tuple): argument string or a tuple of argument strings.\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n Returns:\n function: A datasource that returns the output of a command that takes\n specified arguments passed by the provider.\n \"\"\"\n\n def __init__(self, cmd, provider, context=HostContext, deps=None, split=True, keep_rc=False, timeout=None, inherit_env=None, **kwargs):\n deps = deps if deps is not None else []\n self.cmd = cmd\n self.provider = provider\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env if inherit_env is not None else []\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n source = broker[self.provider]\n ctx = broker[self.context]\n if not isinstance(source, (str, tuple)):\n raise ContentException(\"The provider can only be a single string or a tuple of strings, but got '%s'.\" % source)\n try:\n self.cmd = self.cmd % source\n return CommandOutputProvider(self.cmd, ctx, split=self.split,\n keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env)\n except:\n log.debug(traceback.format_exc())\n raise ContentException(\"No results found for [%s]\" % self.cmd)\n\n\nclass foreach_execute(object):\n \"\"\"\n Execute a command for each element in provider. Provider is the output of\n a different datasource that returns a list of single elements or a list of\n tuples. The command should have %s substitution parameters equal to the\n number of elements in each tuple of the provider.\n\n Args:\n provider (list): a list of elements or tuples.\n cmd (str): a command with substitution parameters. Breaking\n apart a command string that might contain multiple commands\n separated by a pipe, getting them ready for subproc operations.\n IE. A command with filters applied\n context (ExecutionContext): the context under which the datasource\n should run.\n split (bool): whether the output of the command should be split into a\n list of lines\n keep_rc (bool): whether to return the error code returned by the\n process executing the command. If False, any return code other than\n zero with raise a CalledProcessError. If True, the return code and\n output are always returned.\n timeout (int): Number of seconds to wait for the command to complete.\n If the timeout is reached before the command returns, a\n CalledProcessError is raised. If None, timeout is infinite.\n inherit_env (list): The list of environment variables to inherit from the\n calling process when the command is invoked.\n\n\n Returns:\n function: A datasource that returns a list of outputs for each command\n created by substituting each element of provider into the cmd template.\n \"\"\"\n\n def __init__(self, provider, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs):\n self.provider = provider\n self.cmd = cmd\n self.context = context\n self.split = split\n self.raw = not split\n self.keep_rc = keep_rc\n self.timeout = timeout\n self.inherit_env = inherit_env\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n result = []\n source = broker[self.provider]\n ctx = broker[self.context]\n if isinstance(source, ContentProvider):\n source = source.content\n if not isinstance(source, (list, set)):\n source = [source]\n for e in source:\n try:\n the_cmd = self.cmd % e\n cop = CommandOutputProvider(the_cmd, ctx, args=e,\n split=self.split, keep_rc=self.keep_rc, ds=self,\n timeout=self.timeout, inherit_env=self.inherit_env)\n result.append(cop)\n except:\n log.debug(traceback.format_exc())\n if result:\n return result\n raise ContentException(\"No results found for [%s]\" % self.cmd)\n\n\nclass foreach_collect(object):\n \"\"\"\n Subtitutes each element in provider into path and collects the files at the\n resulting paths.\n\n Args:\n provider (list): a list of elements or tuples.\n path (str): a path template with substitution parameters.\n context (ExecutionContext): the context under which the datasource\n should run.\n kind (FileProvider): one of TextFileProvider or RawFileProvider\n\n Returns:\n function: A datasource that returns a list of file contents created by\n substituting each element of provider into the path template.\n \"\"\"\n\n def __init__(self, provider, path, ignore=None, context=HostContext, deps=[], kind=TextFileProvider, **kwargs):\n self.provider = provider\n self.path = path\n self.ignore = ignore\n self.ignore_func = re.compile(ignore).search if ignore else lambda x: False\n self.context = context\n self.kind = kind\n self.raw = kind is RawFileProvider\n self.__name__ = self.__class__.__name__\n datasource(self.provider, self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self)\n\n def __call__(self, broker):\n result = []\n source = broker[self.provider]\n ctx = _get_context(self.context, broker)\n root = ctx.root\n if isinstance(source, ContentProvider):\n source = source.content\n if not isinstance(source, (list, set)):\n source = [source]\n for e in source:\n pattern = ctx.locate_path(self.path % e)\n for p in glob(os.path.join(root, pattern.lstrip('/'))):\n if self.ignore_func(p) or os.path.isdir(p):\n continue\n try:\n result.append(self.kind(p[len(root):], root=root, ds=self, ctx=ctx))\n except:\n log.debug(traceback.format_exc())\n if result:\n return result\n raise ContentException(\"No results found for [%s]\" % self.path)\n\n\nclass first_of(object):\n \"\"\" Given a list of dependencies, returns the first of the list\n that exists in the broker. At least one must be present, or this\n component won't fire.\n \"\"\"\n def __init__(self, deps):\n self.deps = deps\n self.raw = deps[0].raw\n self.__name__ = self.__class__.__name__\n datasource(deps)(self)\n\n def __call__(self, broker):\n for c in self.deps:\n if c in broker:\n return broker[c]\n\n\nclass find(object):\n \"\"\"\n Helper class for extracting specific lines from a datasource for direct\n consumption by a rule.\n\n .. code:: python\n\n service_starts = find(Specs.audit_log, \"SERVICE_START\")\n\n @rule(service_starts)\n def report(starts):\n return make_info(\"SERVICE_STARTS\", num_starts=len(starts))\n\n Args:\n spec (datasource): some datasource, ideally filterable.\n pattern (string / list): a string or list of strings to match (no\n patterns supported)\n\n Returns:\n A dict where each key is a command, path, or spec name, and each value\n is a non-empty list of matching lines. Only paths with matching lines\n are included.\n\n Raises:\n dr.SkipComponent if no paths have matching lines.\n \"\"\"\n\n def __init__(self, spec, pattern):\n if getattr(spec, \"raw\", False):\n name = dr.get_name(spec)\n raise ValueError(\"{}: Cannot filter raw files.\".format(name))\n\n self.spec = spec\n self.pattern = pattern if isinstance(pattern, list) else [pattern]\n self.__name__ = self.__class__.__name__\n self.__module__ = self.__class__.__module__\n\n if getattr(spec, \"filterable\", False):\n _add_filter(spec, pattern)\n\n component(spec)(self)\n\n def __call__(self, ds):\n # /usr/bin/grep level filtering is applied behind .content or\n # .stream(), but we still need to ensure we get only what *this* find\n # instance wants. This can be inefficient on files where many lines\n # match.\n results = {}\n ds = ds if isinstance(ds, list) else [ds]\n for d in ds:\n if d.relative_path:\n origin = os.path.join(\"/\", d.relative_path.lstrip(\"/\"))\n elif d.cmd:\n origin = d.cmd\n else:\n origin = dr.get_name(self.spec)\n stream = d.content if d.loaded else d.stream()\n lines = []\n for line in stream:\n if any(p in line for p in self.pattern):\n lines.append(line)\n if lines:\n results[origin] = lines\n if not results:\n raise dr.SkipComponent()\n return dict(results)\n\n\n@serializer(CommandOutputProvider)\ndef serialize_command_output(obj, root):\n rel = os.path.join(\"insights_commands\", mangle_command(obj.cmd))\n dst = os.path.join(root, rel)\n rc = obj.write(dst)\n return {\n \"rc\": rc,\n \"cmd\": obj.cmd,\n \"args\": obj.args,\n \"relative_path\": rel\n }\n\n\n@deserializer(CommandOutputProvider)\ndef deserialize_command_output(_type, data, root):\n rel = data[\"relative_path\"]\n\n res = SerializedOutputProvider(rel, root)\n\n res.rc = data[\"rc\"]\n res.cmd = data[\"cmd\"]\n res.args = data[\"args\"]\n return res\n\n\n@serializer(TextFileProvider)\ndef serialize_text_file_provider(obj, root):\n dst = os.path.join(root, obj.relative_path)\n rc = obj.write(dst)\n return {\n \"relative_path\": obj.relative_path,\n \"rc\": rc,\n }\n\n\n@deserializer(TextFileProvider)\ndef deserialize_text_provider(_type, data, root):\n rel = data[\"relative_path\"]\n res = SerializedOutputProvider(rel, root)\n res.rc = data[\"rc\"]\n return res\n\n\n@serializer(RawFileProvider)\ndef serialize_raw_file_provider(obj, root):\n dst = os.path.join(root, obj.relative_path)\n rc = obj.write(dst)\n return {\n \"relative_path\": obj.relative_path,\n \"rc\": rc,\n }\n\n\n@deserializer(RawFileProvider)\ndef deserialize_raw_file_provider(_type, data, root):\n rel = data[\"relative_path\"]\n res = SerializedRawOutputProvider(rel, root)\n res.rc = data[\"rc\"]\n return res\n\n\n@serializer(DatasourceProvider)\ndef serialize_datasource_provider(obj, root):\n dst = os.path.join(root, obj.relative_path.lstrip(\"/\"))\n fs.ensure_path(os.path.dirname(dst))\n obj.write(dst)\n return {\"relative_path\": obj.relative_path}\n\n\n@deserializer(DatasourceProvider)\ndef deserialize_datasource_provider(_type, data, root):\n return SerializedOutputProvider(data[\"relative_path\"], root)\n", "path": "insights/core/spec_factory.py" } ]
diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index a600836a76..87ac00ba2f 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -1076,4 +1076,4 @@ def serialize_datasource_provider(obj, root): @deserializer(DatasourceProvider) def deserialize_datasource_provider(_type, data, root): - return SerializedRawOutputProvider(data["relative_path"], root) + return SerializedOutputProvider(data["relative_path"], root)
magenta__magenta-1793
Retraining Onsets and Frames Drums model with E-GMD dataset Hello, I am trying to retrain OaF model with the E-GMD dataset for drums transcription. I first downloaded the E-GMD dataset which has its corresponding csv file and a directoy for each drummer and subdirectories with the sessions. I am trying to do the first step following the code in ```onsets_frames_transcription_create_tfrecords``` which I found that it is: ``` onsets_frames_transcription_create_tfrecords \ --csv=".../e-gmd-v1.0.0/e-gmd-v1.0.0.csv" \ --output_directory=".../e-gmd-v1.0.0" \ --num_shards="0" \ --wav_dir=".../e-gmd-v1.0.0" \ --midi_dir=".../e-gmd-v1.0.0" \ --expected_splits="train,validation,test" ``` But i got the following error which I don't know where does it come from: ``` 2020-08-05 17:23:45.289023: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'cudart64_101.dll'; dlerror: cudart64_101.dll not found 2020-08-05 17:23:45.289348: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine. WARNING:tensorflow:From c:\users\carlos\anaconda3\lib\site-packages\tensorflow\python\compat\v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version. Instructions for updating: non-resource variables are not supported in the long term WARNING:tensorflow:From c:\users\carlos\anaconda3\lib\site-packages\tensorflow\python\compat\v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version. Instructions for updating: non-resource variables are not supported in the long term Traceback (most recent call last): File "c:\users\carlos\anaconda3\lib\runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "c:\users\carlos\anaconda3\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "C:\Users\Carlos\Anaconda3\Scripts\onsets_frames_transcription_create_tfrecords.exe\__main__.py", line 4, in <module> ImportError: cannot import name 'console_entry_point' ``` I don't know if I have to change the paths of the wav and MIDI files in order to have the wav files in a directory and the MIDI files in other directory or the error comes from installation issues, versions, etc. I am using Winows 10.
[ { "content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Beam job for creating tfrecord files from datasets.\n\nExpects a CSV with the following fields: audio_filename, midi_filename, split\n\nUsage:\nonsets_frames_transcription_create_tfrecords \\\n --csv=\"/path/to/dataset.csv\" \\\n --output_directory=\"/path/to/output\" \\\n --num_shards=\"0\" \\\n --wav_dir=\"/path/to/dataset/audio\" \\\n --midi_dir=\"/path/to/dataset/midi\" \\\n --expected_splits=\"train,validation,test\"\n\n\"\"\"\n\nimport collections\nimport copy\nimport csv\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport apache_beam as beam\nfrom apache_beam.metrics import Metrics\nfrom magenta.models.onsets_frames_transcription import audio_label_data_utils\nfrom note_seq import midi_io\nfrom note_seq.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('csv', None, 'Path to dataset CSV')\nflags.DEFINE_string('output_directory', None, 'Path to output_directory')\nflags.DEFINE_string('wav_dir', None, 'Directory for wav files.')\nflags.DEFINE_string('midi_dir', None, 'Directory for midi files.')\nflags.DEFINE_integer('num_shards', 0, 'number of output shards')\nflags.DEFINE_string('expected_splits', 'train,validation,test',\n 'Comma separated list of expected splits.')\nflags.DEFINE_boolean(\n 'add_wav_glob', False,\n 'If true, will add * to end of wav paths and use all matching files.')\nflags.DEFINE_list(\n 'pipeline_options', '--runner=DirectRunner',\n 'A comma-separated list of command line arguments to be used as options '\n 'for the Beam Pipeline.')\n\n\nclass CreateExampleDoFn(beam.DoFn):\n \"\"\"Splits wav and midi files for the dataset.\"\"\"\n\n def __init__(self, wav_dir, midi_dir, add_wav_glob,\n *unused_args, **unused_kwargs):\n self._wav_dir = wav_dir\n self._midi_dir = midi_dir\n self._add_wav_glob = add_wav_glob\n super(CreateExampleDoFn, self).__init__(*unused_args, **unused_kwargs)\n\n def process(self, paths):\n midi_path, wav_path_base = paths\n\n if self._add_wav_glob:\n wav_paths = tf.io.gfile.glob(wav_path_base + '*')\n else:\n wav_paths = [wav_path_base]\n\n if midi_path:\n base_ns = midi_io.midi_file_to_note_sequence(midi_path)\n base_ns.filename = midi_path\n else:\n base_ns = music_pb2.NoteSequence()\n\n for wav_path in wav_paths:\n logging.info('Creating Example %s:%s', midi_path, wav_path)\n wav_data = tf.io.gfile.GFile(wav_path, 'rb').read()\n\n ns = copy.deepcopy(base_ns)\n\n # Use base names.\n ns.id = '%s:%s' % (wav_path.replace(self._wav_dir, ''),\n midi_path.replace(self._midi_dir, ''))\n\n Metrics.counter('create_example', 'read_midi_wav').inc()\n\n example = audio_label_data_utils.create_example(ns.id, ns, wav_data)\n\n Metrics.counter('create_example', 'created_example').inc()\n yield example\n\n\ndef main(argv):\n del argv\n\n\n flags.mark_flags_as_required(['csv', 'output_directory'])\n\n tf.io.gfile.makedirs(FLAGS.output_directory)\n\n with tf.io.gfile.GFile(FLAGS.csv) as f:\n reader = csv.DictReader(f)\n\n splits = collections.defaultdict(list)\n for row in reader:\n splits[row['split']].append(\n (os.path.join(FLAGS.midi_dir, row['midi_filename']),\n os.path.join(FLAGS.wav_dir, row['audio_filename'])))\n\n if sorted(splits.keys()) != sorted(FLAGS.expected_splits.split(',')):\n raise ValueError('Got unexpected set of splits: %s' % list(splits.keys()))\n\n pipeline_options = beam.options.pipeline_options.PipelineOptions(\n FLAGS.pipeline_options)\n with beam.Pipeline(options=pipeline_options) as p:\n for split in splits:\n split_p = p | 'prepare_split_%s' % split >> beam.Create(splits[split])\n split_p |= 'create_examples_%s' % split >> beam.ParDo(\n CreateExampleDoFn(FLAGS.wav_dir, FLAGS.midi_dir, FLAGS.add_wav_glob))\n split_p |= 'write_%s' % split >> beam.io.WriteToTFRecord(\n os.path.join(FLAGS.output_directory, '%s.tfrecord' % split),\n coder=beam.coders.ProtoCoder(tf.train.Example),\n num_shards=FLAGS.num_shards)\n\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py" } ]
[ { "content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Beam job for creating tfrecord files from datasets.\n\nExpects a CSV with the following fields: audio_filename, midi_filename, split\n\nUsage:\nonsets_frames_transcription_create_tfrecords \\\n --csv=\"/path/to/dataset.csv\" \\\n --output_directory=\"/path/to/output\" \\\n --num_shards=\"0\" \\\n --wav_dir=\"/path/to/dataset/audio\" \\\n --midi_dir=\"/path/to/dataset/midi\" \\\n --expected_splits=\"train,validation,test\"\n\n\"\"\"\n\nimport collections\nimport copy\nimport csv\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport apache_beam as beam\nfrom apache_beam.metrics import Metrics\nfrom magenta.models.onsets_frames_transcription import audio_label_data_utils\nfrom note_seq import midi_io\nfrom note_seq.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('csv', None, 'Path to dataset CSV')\nflags.DEFINE_string('output_directory', None, 'Path to output_directory')\nflags.DEFINE_string('wav_dir', None, 'Directory for wav files.')\nflags.DEFINE_string('midi_dir', None, 'Directory for midi files.')\nflags.DEFINE_integer('num_shards', 0, 'number of output shards')\nflags.DEFINE_string('expected_splits', 'train,validation,test',\n 'Comma separated list of expected splits.')\nflags.DEFINE_boolean(\n 'add_wav_glob', False,\n 'If true, will add * to end of wav paths and use all matching files.')\nflags.DEFINE_list(\n 'pipeline_options', '--runner=DirectRunner',\n 'A comma-separated list of command line arguments to be used as options '\n 'for the Beam Pipeline.')\n\n\nclass CreateExampleDoFn(beam.DoFn):\n \"\"\"Splits wav and midi files for the dataset.\"\"\"\n\n def __init__(self, wav_dir, midi_dir, add_wav_glob,\n *unused_args, **unused_kwargs):\n self._wav_dir = wav_dir\n self._midi_dir = midi_dir\n self._add_wav_glob = add_wav_glob\n super(CreateExampleDoFn, self).__init__(*unused_args, **unused_kwargs)\n\n def process(self, paths):\n midi_path, wav_path_base = paths\n\n if self._add_wav_glob:\n wav_paths = tf.io.gfile.glob(wav_path_base + '*')\n else:\n wav_paths = [wav_path_base]\n\n if midi_path:\n base_ns = midi_io.midi_file_to_note_sequence(midi_path)\n base_ns.filename = midi_path\n else:\n base_ns = music_pb2.NoteSequence()\n\n for wav_path in wav_paths:\n logging.info('Creating Example %s:%s', midi_path, wav_path)\n wav_data = tf.io.gfile.GFile(wav_path, 'rb').read()\n\n ns = copy.deepcopy(base_ns)\n\n # Use base names.\n ns.id = '%s:%s' % (wav_path.replace(self._wav_dir, ''),\n midi_path.replace(self._midi_dir, ''))\n\n Metrics.counter('create_example', 'read_midi_wav').inc()\n\n example = audio_label_data_utils.create_example(ns.id, ns, wav_data)\n\n Metrics.counter('create_example', 'created_example').inc()\n yield example\n\n\ndef main(argv):\n del argv\n\n\n flags.mark_flags_as_required(['csv', 'output_directory'])\n\n tf.io.gfile.makedirs(FLAGS.output_directory)\n\n with tf.io.gfile.GFile(FLAGS.csv) as f:\n reader = csv.DictReader(f)\n\n splits = collections.defaultdict(list)\n for row in reader:\n splits[row['split']].append(\n (os.path.join(FLAGS.midi_dir, row['midi_filename']),\n os.path.join(FLAGS.wav_dir, row['audio_filename'])))\n\n if sorted(splits.keys()) != sorted(FLAGS.expected_splits.split(',')):\n raise ValueError('Got unexpected set of splits: %s' % list(splits.keys()))\n\n pipeline_options = beam.options.pipeline_options.PipelineOptions(\n FLAGS.pipeline_options)\n with beam.Pipeline(options=pipeline_options) as p:\n for split in splits:\n split_p = p | 'prepare_split_%s' % split >> beam.Create(splits[split])\n split_p |= 'create_examples_%s' % split >> beam.ParDo(\n CreateExampleDoFn(FLAGS.wav_dir, FLAGS.midi_dir, FLAGS.add_wav_glob))\n split_p |= 'write_%s' % split >> beam.io.WriteToTFRecord(\n os.path.join(FLAGS.output_directory, '%s.tfrecord' % split),\n coder=beam.coders.ProtoCoder(tf.train.Example),\n num_shards=FLAGS.num_shards)\n\n\ndef console_entry_point():\n tf.disable_v2_behavior()\n app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py" } ]
diff --git a/magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py b/magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py index cbc3715402..3b339cc231 100644 --- a/magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py +++ b/magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_tfrecords.py @@ -139,5 +139,10 @@ def main(argv): num_shards=FLAGS.num_shards) -if __name__ == '__main__': +def console_entry_point(): + tf.disable_v2_behavior() app.run(main) + + +if __name__ == '__main__': + console_entry_point()
django-wiki__django-wiki-384
Downloading attachments with non-ascii characters is broken with Python 2 ``` UnicodeDecodeError at /12/plugin/attachments/download/33/ 'ascii' codec can't decode byte 0xc3 in position 0: ordinal not in range(128) Request Method: GET Request URL: http://localhost:8000/12/plugin/attachments/download/33/ Django Version: 1.7.4 Exception Type: UnicodeDecodeError Exception Value: 'ascii' codec can't decode byte 0xc3 in position 0: ordinal not in range(128) Exception Location: /home/christian/Projects/Python/django-wiki/testproject/wiki/core/http.py in send_file, line 50 Python Executable: /home/christian/.virtualenvs/wiki-py2/bin/python ```
[ { "content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport os\nimport mimetypes\nfrom datetime import datetime\n\nfrom django.http import HttpResponse\nfrom django.utils.http import http_date\nfrom django.utils import dateformat\n\nfrom wiki.conf import settings\n\ndef django_sendfile_response(request, filepath):\n from sendfile import sendfile\n return sendfile(request, filepath)\n\n\ndef send_file(request, filepath, last_modified=None, filename=None):\n fullpath = filepath\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if filename:\n mimetype, encoding = mimetypes.guess_type(filename)\n else:\n mimetype, encoding = mimetypes.guess_type(fullpath)\n \n mimetype = mimetype or 'application/octet-stream'\n \n if settings.USE_SENDFILE:\n response = django_sendfile_response(request, filepath)\n else:\n response = HttpResponse(open(fullpath, 'rb').read(), mimetype=mimetype)\n \n if not last_modified:\n response[\"Last-Modified\"] = http_date(statobj.st_mtime)\n else:\n if isinstance(last_modified, datetime):\n last_modified = float(dateformat.format(last_modified, 'U'))\n response[\"Last-Modified\"] = http_date(epoch_seconds=last_modified)\n \n response[\"Content-Length\"] = statobj.st_size\n \n if encoding:\n response[\"Content-Encoding\"] = encoding\n \n # TODO: Escape filename\n if filename:\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % filename.encode('utf-8')\n \n return response\n", "path": "wiki/core/http.py" } ]
[ { "content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport os\nimport mimetypes\nfrom datetime import datetime\n\nfrom django.http import HttpResponse\nfrom django.utils.http import http_date\nfrom django.utils import dateformat\n\nfrom wiki.conf import settings\n\ndef django_sendfile_response(request, filepath):\n from sendfile import sendfile\n return sendfile(request, filepath)\n\n\ndef send_file(request, filepath, last_modified=None, filename=None):\n fullpath = filepath\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if filename:\n mimetype, encoding = mimetypes.guess_type(filename)\n else:\n mimetype, encoding = mimetypes.guess_type(fullpath)\n \n mimetype = mimetype or 'application/octet-stream'\n \n if settings.USE_SENDFILE:\n response = django_sendfile_response(request, filepath)\n else:\n response = HttpResponse(open(fullpath, 'rb').read(), mimetype=mimetype)\n \n if not last_modified:\n response[\"Last-Modified\"] = http_date(statobj.st_mtime)\n else:\n if isinstance(last_modified, datetime):\n last_modified = float(dateformat.format(last_modified, 'U'))\n response[\"Last-Modified\"] = http_date(epoch_seconds=last_modified)\n \n response[\"Content-Length\"] = statobj.st_size\n \n if encoding:\n response[\"Content-Encoding\"] = encoding\n \n # TODO: Escape filename\n if filename:\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % filename\n \n return response\n", "path": "wiki/core/http.py" } ]
diff --git a/wiki/core/http.py b/wiki/core/http.py index e7cc3f0d8..0cb598bbf 100644 --- a/wiki/core/http.py +++ b/wiki/core/http.py @@ -45,6 +45,6 @@ def send_file(request, filepath, last_modified=None, filename=None): # TODO: Escape filename if filename: - response["Content-Disposition"] = "attachment; filename=%s" % filename.encode('utf-8') + response["Content-Disposition"] = "attachment; filename=%s" % filename return response
xonsh__xonsh-2332
xoreutils: echo fails with KeyError: 'help' Any `echo` invocation fails: ```shell $ $XONSH_SHOW_TRACEBACK = True $ echo xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec) File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four return f(args, stdin, stdout, stderr) File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo if opts['help']: KeyError: 'help' $ echo foo xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec) File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four return f(args, stdin, stdout, stderr) File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo if opts['help']: KeyError: 'help' $ echo "foo" xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec) File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four return f(args, stdin, stdout, stderr) File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo if opts['help']: KeyError: 'help' ``` Obviously, the problem is that `help` is looked up but missing: http://xon.sh/_modules/xonsh/xoreutils/echo.html#echo
[ { "content": "\"\"\"Implements a simple echo command for xonsh.\"\"\"\n\n\ndef echo(args, stdin, stdout, stderr):\n \"\"\"A simple echo command.\"\"\"\n opts = _echo_parse_args(args)\n if opts is None:\n return\n if opts['help']:\n print(ECHO_HELP, file=stdout)\n return 0\n ender = opts['end']\n args = map(str, args)\n if opts['escapes']:\n args = map(lambda x: x.encode().decode('unicode_escape'), args)\n print(*args, end=ender, file=stdout)\n\n\ndef _echo_parse_args(args):\n out = {'escapes': False, 'end': '\\n'}\n if '-e' in args:\n args.remove('-e')\n out['escapes'] = True\n if '-E' in args:\n args.remove('-E')\n out['escapes'] = False\n if '-n' in args:\n args.remove('-n')\n out['end'] = ''\n if '-h' in args or '--help' in args:\n out['help'] = True\n return out\n\n\nECHO_HELP = \"\"\"Usage: echo [OPTIONS]... [STRING]...\nEcho the STRING(s) to standard output.\n\n -n do not include the trailing newline\n -e enable interpretation of backslash escapes\n -E disable interpretation of backslash escapes (default)\n -h --help display this message and exit\n\nThis version of echo was written in Python for the xonsh project: http://xon.sh\nBased on echo from GNU coreutils: http://www.gnu.org/software/coreutils/\"\"\"\n", "path": "xonsh/xoreutils/echo.py" } ]
[ { "content": "\"\"\"Implements a simple echo command for xonsh.\"\"\"\n\n\ndef echo(args, stdin, stdout, stderr):\n \"\"\"A simple echo command.\"\"\"\n opts = _echo_parse_args(args)\n if opts is None:\n return\n if opts['help']:\n print(ECHO_HELP, file=stdout)\n return 0\n ender = opts['end']\n args = map(str, args)\n if opts['escapes']:\n args = map(lambda x: x.encode().decode('unicode_escape'), args)\n print(*args, end=ender, file=stdout)\n\n\ndef _echo_parse_args(args):\n out = {'escapes': False, 'end': '\\n', 'help': False}\n if '-e' in args:\n args.remove('-e')\n out['escapes'] = True\n if '-E' in args:\n args.remove('-E')\n out['escapes'] = False\n if '-n' in args:\n args.remove('-n')\n out['end'] = ''\n if '-h' in args or '--help' in args:\n out['help'] = True\n return out\n\n\nECHO_HELP = \"\"\"Usage: echo [OPTIONS]... [STRING]...\nEcho the STRING(s) to standard output.\n\n -n do not include the trailing newline\n -e enable interpretation of backslash escapes\n -E disable interpretation of backslash escapes (default)\n -h --help display this message and exit\n\nThis version of echo was written in Python for the xonsh project: http://xon.sh\nBased on echo from GNU coreutils: http://www.gnu.org/software/coreutils/\"\"\"\n", "path": "xonsh/xoreutils/echo.py" } ]
diff --git a/news/fix-echo.rst b/news/fix-echo.rst new file mode 100644 index 0000000000..18e419918e --- /dev/null +++ b/news/fix-echo.rst @@ -0,0 +1,13 @@ +**Added:** None + +**Changed:** None + +**Deprecated:** None + +**Removed:** None + +**Fixed:** + +* Fix echo command from xoreutils. + +**Security:** None diff --git a/xonsh/xoreutils/echo.py b/xonsh/xoreutils/echo.py index 0becc6618d..5c0a2f5fda 100644 --- a/xonsh/xoreutils/echo.py +++ b/xonsh/xoreutils/echo.py @@ -17,7 +17,7 @@ def echo(args, stdin, stdout, stderr): def _echo_parse_args(args): - out = {'escapes': False, 'end': '\n'} + out = {'escapes': False, 'end': '\n', 'help': False} if '-e' in args: args.remove('-e') out['escapes'] = True
databricks__koalas-105
How to call dev/_make_missing_functions.py? ``` rxin @ C02XT0W6JGH5 : ~/workspace/spark-pandas (master) > dev/_make_missing_functions.py Traceback (most recent call last): File "dev/_make_missing_functions.py", line 22, in <module> from databricks.koala.frame import PandasLikeDataFrame ImportError: No module named databricks.koala.frame ``` Do I need to install koala first? We should add documentation to CONTRIBUTING.md. It'd also be best if this runs against the existing code base, rather than a system-wide installed Koala.
[ { "content": "#!/usr/bin/env python\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport inspect\n\nimport pandas as pd\n\nfrom databricks.koalas.frame import PandasLikeDataFrame\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.missing.series import _MissingPandasLikeSeries\nfrom databricks.koalas.series import PandasLikeSeries\n\n\nINDENT_LEN = 4\nLINE_LEN_LIMIT = 100\n\n\ndef inspect_missing_functions(original_type, target_type, missing_type):\n \"\"\"\n Find functions which exist in original_type but not in target_type,\n or the signature is modified.\n\n :return: the tuple of the missing function name and its signature,\n and the name of the functions the signature of which is different\n and its original and modified signature.\n \"\"\"\n missing = []\n modified = []\n\n already_in_missing = set([(name, inspect.signature(func)) for name, func\n in inspect.getmembers(missing_type, inspect.isfunction)])\n for name, func in inspect.getmembers(original_type, inspect.isfunction):\n # Skip the private attributes\n if name.startswith('_'):\n continue\n\n original_signature = inspect.signature(func, follow_wrapped=True)\n\n if hasattr(target_type, name):\n f = getattr(target_type, name)\n if inspect.isfunction(f):\n target_signature = inspect.signature(f)\n if (name, target_signature) in already_in_missing:\n missing.append((name, original_signature))\n elif str(original_signature) != str(target_signature):\n modified.append((name, original_signature, target_signature))\n continue\n\n missing.append((name, original_signature))\n\n return missing, modified\n\n\ndef format_arguments(arguments, prefix_len, suffix_len):\n \"\"\"Format arguments not to break pydocstyle.\n\n :param arguments: the argument list\n :param prefix_len: the prefix length when the argument string needs line break\n :param suffix_len: the suffix length to check the line length exceeds the limit\n :return: the formatted argument string\n \"\"\"\n lines = ['']\n\n def append_arg(arg):\n if prefix_len + len(lines[-1]) + len(', ') + len(arg) + suffix_len > LINE_LEN_LIMIT:\n lines.append('')\n append_arg(arg)\n else:\n if len(lines[-1]) > 0:\n arg = ', {}'.format(arg)\n lines[-1] += arg\n\n for arg in arguments:\n append_arg(arg)\n\n return (',\\n' + (' ' * prefix_len)).join(lines)\n\n\ndef format_method_arguments(name, signature):\n \"\"\"Format the method arguments from its name and signature.\n\n :return: the formatted argument string\n \"\"\"\n arguments = []\n\n for param in signature.parameters.values():\n if param.default is not inspect.Signature.empty and isinstance(param.default, type):\n arguments.append('{}={}'.format(param.name, param.default.__name__))\n elif param.default is not inspect.Signature.empty and repr(param.default) == 'nan':\n arguments.append('{}={}'.format(param.name, 'np.nan'))\n else:\n arguments.append(str(param))\n\n prefix_len = INDENT_LEN + len('def {}('.format(name))\n suffix_len = len('):')\n return format_arguments(arguments, prefix_len, suffix_len)\n\n\ndef format_derived_from(original_type, unavailable_arguments, signature):\n \"\"\"Format `@derived_from` decorator.\n\n :param original_type: the original type to be derived\n :param unavailable_arguments: the arguments Koalas does not support yet\n :param signature: the method signature\n :return: the formatted `@derived_from` decorator\n \"\"\"\n if len(unavailable_arguments) == 0:\n return '@derived_from(pd.{})'.format(original_type.__name__)\n\n arguments = []\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n arguments.append(repr(arg))\n\n prefix = '@derived_from(pd.{}, ua_args=['.format(original_type.__name__)\n suffix = '])'\n prefix_len = INDENT_LEN + len(prefix)\n suffix_len = len(suffix)\n return '{}{}{}'.format(prefix, format_arguments(arguments, prefix_len, suffix_len), suffix)\n\n\ndef format_raise_errors(original_type, name, unavailable_arguments, signature):\n \"\"\"\n Format raise error statements for unavailable arguments when specified the different value\n from the default value.\n\n :return: the formatted raise error statements\n \"\"\"\n raise_errors = ''\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n if repr(param.default) == 'nan':\n not_equal = 'not np.isnan({})'.format(arg)\n elif isinstance(param.default, type):\n not_equal = '{} is not {}'.format(arg, param.default.__name__)\n elif param.default is None or \\\n param.default is True or param.default is False:\n not_equal = '{} is not {}'.format(arg, repr(param.default))\n else:\n not_equal = '{} != {}'.format(arg, repr(param.default))\n\n raise_error_prefix = 'raise PandasNotImplementedError('\n raise_error_suffix = ')'\n arguments = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name),\n \"arg_name='{}'\".format(arg)],\n prefix_len=(INDENT_LEN * 3 + len(raise_error_prefix)),\n suffix_len=len(raise_error_suffix))\n raise_errors += (\"\"\"\n if {0}:\n {1}{2}{3}\"\"\".format(not_equal, raise_error_prefix, arguments, raise_error_suffix))\n\n return raise_errors\n\n\ndef make_misssing_function(original_type, name, signature):\n \"\"\"Make a missing functions stub.\n\n :return: the stub definition for the missing function\n \"\"\"\n arguments = format_method_arguments(name, signature)\n error_argument = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name)],\n prefix_len=(8 + len('raise PandasNotImplementedError(')),\n suffix_len=len(')'))\n\n return (\"\"\"\n def {0}({1}):\n \\\"\"\"A stub for the equivalent method to `pd.{2}.{0}()`.\n\n The method `pd.{2}.{0}()` is not implemented yet.\n \\\"\"\"\n raise PandasNotImplementedError({3})\"\"\"\n .format(name, arguments, original_type.__name__, error_argument))\n\n\ndef make_modified_function_def(original_type, name, original, target):\n \"\"\"Make the modified function definition.\n\n :return: the definition for the modified function\n \"\"\"\n arguments = format_method_arguments(name, original)\n argument_names = set(target.parameters)\n unavailable_arguments = [p for p in original.parameters if p not in argument_names]\n derived_from = format_derived_from(original_type, unavailable_arguments, original)\n raise_error = format_raise_errors(original_type, name, unavailable_arguments, original)\n return (\"\"\"\n {0}\n def {1}({2}):{3}\"\"\".format(derived_from, name, arguments, raise_error))\n\n\ndef _main():\n for original_type, target_type, missing_type in \\\n [(pd.DataFrame, PandasLikeDataFrame, _MissingPandasLikeDataFrame),\n (pd.Series, PandasLikeSeries, _MissingPandasLikeSeries)]:\n missing, modified = inspect_missing_functions(original_type, target_type, missing_type)\n\n print('MISSING functions for {}'.format(original_type.__name__))\n for name, signature in missing:\n # print(make_misssing_function(original_type, name, signature))\n print(\"\"\" {0} = unsupported_function('{0}')\"\"\".format(name))\n\n print()\n print('MODIFIED functions for {}'.format(original_type.__name__))\n for name, original, target in modified:\n print(make_modified_function_def(original_type, name, original, target))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "dev/_make_missing_functions.py" } ]
[ { "content": "#!/usr/bin/env python\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA script to generate the missing function stubs. Before running this,\nmake sure you install koalas from the current checkout by running:\npip install -e .\n\"\"\"\n\nimport inspect\n\nimport pandas as pd\n\nfrom databricks.koalas.frame import PandasLikeDataFrame\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.missing.series import _MissingPandasLikeSeries\nfrom databricks.koalas.series import PandasLikeSeries\n\n\nINDENT_LEN = 4\nLINE_LEN_LIMIT = 100\n\n\ndef inspect_missing_functions(original_type, target_type, missing_type):\n \"\"\"\n Find functions which exist in original_type but not in target_type,\n or the signature is modified.\n\n :return: the tuple of the missing function name and its signature,\n and the name of the functions the signature of which is different\n and its original and modified signature.\n \"\"\"\n missing = []\n modified = []\n\n already_in_missing = set([(name, inspect.signature(func)) for name, func\n in inspect.getmembers(missing_type, inspect.isfunction)])\n for name, func in inspect.getmembers(original_type, inspect.isfunction):\n # Skip the private attributes\n if name.startswith('_'):\n continue\n\n original_signature = inspect.signature(func, follow_wrapped=True)\n\n if hasattr(target_type, name):\n f = getattr(target_type, name)\n if inspect.isfunction(f):\n target_signature = inspect.signature(f)\n if (name, target_signature) in already_in_missing:\n missing.append((name, original_signature))\n elif str(original_signature) != str(target_signature):\n modified.append((name, original_signature, target_signature))\n continue\n\n missing.append((name, original_signature))\n\n return missing, modified\n\n\ndef format_arguments(arguments, prefix_len, suffix_len):\n \"\"\"Format arguments not to break pydocstyle.\n\n :param arguments: the argument list\n :param prefix_len: the prefix length when the argument string needs line break\n :param suffix_len: the suffix length to check the line length exceeds the limit\n :return: the formatted argument string\n \"\"\"\n lines = ['']\n\n def append_arg(arg):\n if prefix_len + len(lines[-1]) + len(', ') + len(arg) + suffix_len > LINE_LEN_LIMIT:\n lines.append('')\n append_arg(arg)\n else:\n if len(lines[-1]) > 0:\n arg = ', {}'.format(arg)\n lines[-1] += arg\n\n for arg in arguments:\n append_arg(arg)\n\n return (',\\n' + (' ' * prefix_len)).join(lines)\n\n\ndef format_method_arguments(name, signature):\n \"\"\"Format the method arguments from its name and signature.\n\n :return: the formatted argument string\n \"\"\"\n arguments = []\n\n for param in signature.parameters.values():\n if param.default is not inspect.Signature.empty and isinstance(param.default, type):\n arguments.append('{}={}'.format(param.name, param.default.__name__))\n elif param.default is not inspect.Signature.empty and repr(param.default) == 'nan':\n arguments.append('{}={}'.format(param.name, 'np.nan'))\n else:\n arguments.append(str(param))\n\n prefix_len = INDENT_LEN + len('def {}('.format(name))\n suffix_len = len('):')\n return format_arguments(arguments, prefix_len, suffix_len)\n\n\ndef format_derived_from(original_type, unavailable_arguments, signature):\n \"\"\"Format `@derived_from` decorator.\n\n :param original_type: the original type to be derived\n :param unavailable_arguments: the arguments Koalas does not support yet\n :param signature: the method signature\n :return: the formatted `@derived_from` decorator\n \"\"\"\n if len(unavailable_arguments) == 0:\n return '@derived_from(pd.{})'.format(original_type.__name__)\n\n arguments = []\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n arguments.append(repr(arg))\n\n prefix = '@derived_from(pd.{}, ua_args=['.format(original_type.__name__)\n suffix = '])'\n prefix_len = INDENT_LEN + len(prefix)\n suffix_len = len(suffix)\n return '{}{}{}'.format(prefix, format_arguments(arguments, prefix_len, suffix_len), suffix)\n\n\ndef format_raise_errors(original_type, name, unavailable_arguments, signature):\n \"\"\"\n Format raise error statements for unavailable arguments when specified the different value\n from the default value.\n\n :return: the formatted raise error statements\n \"\"\"\n raise_errors = ''\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n if repr(param.default) == 'nan':\n not_equal = 'not np.isnan({})'.format(arg)\n elif isinstance(param.default, type):\n not_equal = '{} is not {}'.format(arg, param.default.__name__)\n elif param.default is None or \\\n param.default is True or param.default is False:\n not_equal = '{} is not {}'.format(arg, repr(param.default))\n else:\n not_equal = '{} != {}'.format(arg, repr(param.default))\n\n raise_error_prefix = 'raise PandasNotImplementedError('\n raise_error_suffix = ')'\n arguments = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name),\n \"arg_name='{}'\".format(arg)],\n prefix_len=(INDENT_LEN * 3 + len(raise_error_prefix)),\n suffix_len=len(raise_error_suffix))\n raise_errors += (\"\"\"\n if {0}:\n {1}{2}{3}\"\"\".format(not_equal, raise_error_prefix, arguments, raise_error_suffix))\n\n return raise_errors\n\n\ndef make_misssing_function(original_type, name, signature):\n \"\"\"Make a missing functions stub.\n\n :return: the stub definition for the missing function\n \"\"\"\n arguments = format_method_arguments(name, signature)\n error_argument = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name)],\n prefix_len=(8 + len('raise PandasNotImplementedError(')),\n suffix_len=len(')'))\n\n return (\"\"\"\n def {0}({1}):\n \\\"\"\"A stub for the equivalent method to `pd.{2}.{0}()`.\n\n The method `pd.{2}.{0}()` is not implemented yet.\n \\\"\"\"\n raise PandasNotImplementedError({3})\"\"\"\n .format(name, arguments, original_type.__name__, error_argument))\n\n\ndef make_modified_function_def(original_type, name, original, target):\n \"\"\"Make the modified function definition.\n\n :return: the definition for the modified function\n \"\"\"\n arguments = format_method_arguments(name, original)\n argument_names = set(target.parameters)\n unavailable_arguments = [p for p in original.parameters if p not in argument_names]\n derived_from = format_derived_from(original_type, unavailable_arguments, original)\n raise_error = format_raise_errors(original_type, name, unavailable_arguments, original)\n return (\"\"\"\n {0}\n def {1}({2}):{3}\"\"\".format(derived_from, name, arguments, raise_error))\n\n\ndef _main():\n for original_type, target_type, missing_type in \\\n [(pd.DataFrame, PandasLikeDataFrame, _MissingPandasLikeDataFrame),\n (pd.Series, PandasLikeSeries, _MissingPandasLikeSeries)]:\n missing, modified = inspect_missing_functions(original_type, target_type, missing_type)\n\n print('MISSING functions for {}'.format(original_type.__name__))\n for name, signature in missing:\n # print(make_misssing_function(original_type, name, signature))\n print(\"\"\" {0} = unsupported_function('{0}')\"\"\".format(name))\n\n print()\n print('MODIFIED functions for {}'.format(original_type.__name__))\n for name, original, target in modified:\n print(make_modified_function_def(original_type, name, original, target))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "dev/_make_missing_functions.py" } ]
diff --git a/dev/_make_missing_functions.py b/dev/_make_missing_functions.py index 8df7b01149..5351d53bde 100755 --- a/dev/_make_missing_functions.py +++ b/dev/_make_missing_functions.py @@ -15,6 +15,12 @@ # limitations under the License. # +""" +A script to generate the missing function stubs. Before running this, +make sure you install koalas from the current checkout by running: +pip install -e . +""" + import inspect import pandas as pd
django-json-api__django-rest-framework-json-api-440
Test with tox in CI 2.3.0 is currently broken unless a user's environment happens to have `six` installed. If the CI tests ran at least one test that minimally installs the package and runs some basic tests to import things, then that kind of problem would be avoided.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_mock = sys.version_info < (3, 3)\nmock = ['mock'] if needs_mock else []\nneeds_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\nneeds_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)\nsphinx = ['sphinx'] if needs_sphinx else []\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='MIT',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.6.3',\n 'django>=1.11',\n 'six',\n ],\n setup_requires=pytest_runner + sphinx + wheel,\n tests_require=[\n 'pytest-factoryboy',\n 'factory-boy',\n 'pytest-django',\n 'pytest',\n 'django-polymorphic>=2.0',\n 'packaging',\n 'django-debug-toolbar'\n ] + mock,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_mock = sys.version_info < (3, 3)\nmock = ['mock'] if needs_mock else []\nneeds_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\nneeds_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)\nsphinx = ['sphinx'] if needs_sphinx else []\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='MIT',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.6.3',\n 'django>=1.11',\n 'six',\n ],\n setup_requires=pytest_runner + sphinx + wheel,\n tests_require=[\n 'pytest-factoryboy',\n 'factory-boy',\n 'pytest-django',\n 'pytest',\n 'pytest-cov',\n 'django-polymorphic>=2.0',\n 'packaging',\n 'django-debug-toolbar'\n ] + mock,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/.gitignore b/.gitignore index ac41fdd8..1207cc48 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,10 @@ pip-delete-this-directory.txt .idea/ # PyTest cache -.cache/ +.pytest_cache/ + +# Coverage +.coverage # Tox .tox/ diff --git a/.travis.yml b/.travis.yml index 2a30c0c5..09cd5729 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,3 @@ ---- language: python sudo: false cache: pip @@ -6,45 +5,38 @@ cache: pip matrix: include: - python: 2.7 - env: DJANGO=">=1.11,<2.0" DRF=">=3.6.3,<3.7" + env: TOXENV=py27-django111-drf36 - python: 2.7 - env: DJANGO=">=1.11,<2.0" DRF=">=3.7.0,<3.8" + env: TOXENV=py27-django111-drf37 - python: 3.4 - env: DJANGO=">=1.11,<2.0" DRF=">=3.6.3,<3.7" + env: TOXENV=py34-django111-drf36 - python: 3.4 - env: DJANGO=">=1.11,<2.0" DRF=">=3.7.0,<3.8" + env: TOXENV=py34-django111-drf37 - python: 3.4 - env: DJANGO=">=2.0,<2.1" DRF=">=3.7.0,<3.8" + env: TOXENV=py34-django20-drf37 - python: 3.5 - env: DJANGO=">=1.11,<2.0" DRF=">=3.6.3,<3.7" + env: TOXENV=py35-django111-drf36 - python: 3.5 - env: DJANGO=">=1.11,<2.0" DRF=">=3.7.0,<3.8" + env: TOXENV=py35-django111-drf37 - python: 3.5 - env: DJANGO=">=2.0,<2.1" DRF=">=3.7.0,<3.8" + env: TOXENV=py35-django20-drf37 - python: 3.6 - env: DJANGO=">=1.11,<2.0" DRF=">=3.6.3,<3.7" + env: TOXENV=py36-django111-drf36 - python: 3.6 - env: DJANGO=">=1.11,<2.0" DRF=">=3.7.0,<3.8" + env: TOXENV=py36-django111-drf37 - python: 3.6 - env: DJANGO=">=2.0,<2.1" DRF=">=3.7.0,<3.8" -before_install: - # Force an upgrade of py & pytest to avoid VersionConflict - - pip install --upgrade py - # Faker requires a newer pytest - - pip install "pytest>3.3" - - pip install codecov flake8 isort + env: TOXENV=py36-django20-drf37 + - python: 3.6 + env: TOXENV=flake8 + - python: 3.6 + env: TOXENV=isort install: - - pip install Django${DJANGO} djangorestframework${DRF} - - python setup.py install + - pip install tox script: - - flake8 - - isort --check-only --verbose --recursive --diff rest_framework_json_api - # example has extra dependencies that are installed in a dev environment - # but are not installed in CI. Explicitly set those packages. - - isort --check-only --verbose --recursive --diff --thirdparty pytest --thirdparty polymorphic --thirdparty pytest_factoryboy --thirdparty packaging example - - coverage run setup.py -v test + - tox after_success: - - codecov + - pip install codecov + - codecov -e TOXENV diff --git a/setup.cfg b/setup.cfg index 10bd79e4..0e89958c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,7 +7,11 @@ universal = 1 [flake8] ignore = F405 max-line-length = 100 -exclude = docs/conf.py,build,migrations +exclude = + docs/conf.py, + build, + migrations, + .tox, [isort] indent = 4 @@ -18,3 +22,9 @@ known_standard_library = mock line_length = 100 multi_line_output = 3 skip_glob=*migrations* + +[coverage:report] +omit= + .tox/* + .eggs/* +show_missing = True diff --git a/setup.py b/setup.py index c99639a0..7ff381e2 100755 --- a/setup.py +++ b/setup.py @@ -108,6 +108,7 @@ def get_package_data(package): 'factory-boy', 'pytest-django', 'pytest', + 'pytest-cov', 'django-polymorphic>=2.0', 'packaging', 'django-debug-toolbar' diff --git a/tox.ini b/tox.ini index dc7470e0..c5c341fb 100644 --- a/tox.ini +++ b/tox.ini @@ -1,10 +1,12 @@ [tox] envlist = py{27,34,35,36}-django111-drf{36,37}, + py{34,35,36}-django20-drf{37}, [testenv] deps = django111: Django>=1.11,<1.12 + django20: Django>=2.0,<2.1 drf36: djangorestframework>=3.6.3,<3.7 drf37: djangorestframework>=3.7.0,<3.8 @@ -13,7 +15,12 @@ setenv = DJANGO_SETTINGS_MODULE=example.settings.test commands = - python setup.py test {posargs} + python setup.py test --addopts '--cov --no-cov-on-fail' {posargs} + +[testenv:flake8] +deps = flake8 +commands = flake8 +skip_install = true [testenv:isort] deps = @@ -22,4 +29,4 @@ commands = isort --check-only --verbose --recursive --diff rest_framework_json_api # example has extra dependencies that are installed in a dev environment # but are not installed in CI. Explicitly set those packages. - isort --check-only --verbose --recursive --diff --thirdparty pytest --thirdparty polymorphic --thirdparty pytest_factoryboy example + isort --check-only --verbose --recursive --diff --thirdparty pytest --thirdparty polymorphic --thirdparty pytest_factoryboy --thirdparty packaging example
deis__deis-280
Update chef_version in provisioning scripts I see in the digitalocean support that @bacongobbler removed the --bootstrap-version=11.4.4 and things still seem to work with more current Chef (11.6.2). This wasn't the case before--the apt cookbook failed--so we had pinned it at a working version. Let's retest that we're compatible with Chef 11.6.x and then remove --bootstrap-version from the provisioning scripts if so.
[ { "content": "\"\"\"\nDeis configuration management implementation for Opscode Chef.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport time\nimport socket\n\nfrom celery.canvas import group\n\nfrom api.ssh import exec_ssh, connect_ssh\nfrom cm.chef_api import ChefAPI\n\n\nCHEF_CONFIG_PATH = '/etc/chef'\nCHEF_INSTALL_TYPE = 'gems'\nCHEF_RUBY_VERSION = '1.9.1'\nCHEF_ENVIRONMENT = '_default'\nCHEF_CLIENT_VERSION = '11.4.4'\n\n# load chef config using CHEF_CONFIG_PATH\ntry:\n # parse controller's chef config for server_url and client_name\n _client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')\n if not os.path.exists(_client_cfg_path):\n raise EnvironmentError('Could not find {}'.format(_client_cfg_path))\n with open(_client_cfg_path) as f:\n _data = f.read()\n # construct a dict from the ruby client.rb\n _d = {}\n for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \\t]+(.*)$''',\n _data, re.MULTILINE):\n _d[m[0]] = m[1].strip(\"'\").strip('\"')\n # set global variables from client.rb\n CHEF_SERVER_URL = _d['chef_server_url']\n CHEF_NODE_NAME = _d.get('node_name', socket.gethostname())\n CHEF_CLIENT_NAME = _d.get('node_name', socket.gethostname())\n CHEF_VALIDATION_NAME = _d['validation_client_name']\n # read the client key\n _client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')\n CHEF_CLIENT_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _client_pem_path]).strip('\\n')\n # read the validation key\n _valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')\n CHEF_VALIDATION_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _valid_pem_path]).strip('\\n')\nexcept Exception as err:\n msg = \"Failed to auto-configure Chef -- {}\".format(err)\n if os.environ.get('READTHEDOCS'):\n # Just print the error if Sphinx is running\n print(msg)\n else:\n raise EnvironmentError(msg)\n\n\ndef _get_client():\n \"\"\"\n Return a new instance of a Chef API Client\n\n :rtype: a :class:`~cm.chef_api.ChefAPI` object\n \"\"\"\n return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)\n\n\ndef bootstrap_node(node):\n \"\"\"\n Bootstrap the Chef configuration management tools onto a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :raises: RuntimeError\n \"\"\"\n # block until we can connect over ssh\n ssh = connect_ssh(node['ssh_username'], node['fqdn'], node.get('ssh_port', 22),\n node['ssh_private_key'], timeout=120)\n # block until ubuntu cloud-init is finished\n initializing = True\n while initializing:\n time.sleep(10)\n initializing, _rc = exec_ssh(ssh, 'ps auxw | egrep \"cloud-init\" | grep -v egrep')\n # write out private key and prepare to `knife bootstrap`\n try:\n _, pk_path = tempfile.mkstemp()\n _, output_path = tempfile.mkstemp()\n with open(pk_path, 'w') as f:\n f.write(node['ssh_private_key'])\n # build knife bootstrap command\n args = ['knife', 'bootstrap', node['fqdn']]\n args.extend(['--identity-file', pk_path])\n args.extend(['--node-name', node['id']])\n args.extend(['--sudo', '--ssh-user', node['ssh_username']])\n args.extend(['--ssh-port', str(node.get('ssh_port', 22))])\n args.extend(['--bootstrap-version', CHEF_CLIENT_VERSION])\n args.extend(['--no-host-key-verify'])\n args.extend(['--run-list', _construct_run_list(node)])\n print(' '.join(args))\n # tee the command's output to a tempfile\n args.extend(['|', 'tee', output_path])\n # TODO: figure out why home isn't being set correctly for knife exec\n env = os.environ.copy()\n env['HOME'] = '/opt/deis'\n # execute knife bootstrap\n p = subprocess.Popen(' '.join(args), env=env, shell=True)\n rc = p.wait()\n # always print knife output\n with open(output_path) as f:\n output = f.read()\n print(output)\n # raise an exception if bootstrap failed\n if rc != 0:\n raise RuntimeError('Node Bootstrap Error')\n # remove temp files from filesystem\n finally:\n os.remove(pk_path)\n os.remove(output_path)\n\n\ndef _construct_run_list(node):\n config = node['config']\n # if run_list override specified, use it (assumes csv)\n run_list = config.get('run_list', [])\n # otherwise construct a run_list using proxy/runtime flags\n if not run_list:\n run_list = ['recipe[deis]']\n if node.get('runtime') is True:\n run_list.append('recipe[deis::runtime]')\n if node.get('proxy') is True:\n run_list.append('recipe[deis::proxy]')\n return ','.join(run_list)\n\n\ndef purge_node(node):\n \"\"\"\n Purge a node and its client from Chef configuration management.\n\n :param node: a dict containing the id of a node to purge\n \"\"\"\n client = _get_client()\n client.delete_node(node['id'])\n client.delete_client(node['id'])\n\n\ndef converge_controller():\n \"\"\"\n Converge this controller node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :returns: the output of the convergence command, in this case `sudo chef-client`\n \"\"\"\n try:\n return subprocess.check_output(['sudo', 'chef-client'])\n except subprocess.CalledProcessError as err:\n print(err)\n print(err.output)\n raise err\n\n\ndef converge_node(node):\n \"\"\"\n Converge a node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :returns: a tuple of the convergence command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'],\n node['fqdn'], 22,\n node['ssh_private_key'])\n output, rc = exec_ssh(ssh, 'sudo chef-client')\n print(output)\n if rc != 0:\n e = RuntimeError('Node converge error')\n e.output = output\n raise e\n return output, rc\n\n\ndef run_node(node, command):\n \"\"\"\n Run a command on a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :param command: the command-line to execute on the node\n :returns: a tuple of the command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'], node['fqdn'],\n node['ssh_port'], node['ssh_private_key'])\n output, rc = exec_ssh(ssh, command, pty=True)\n return output, rc\n\n\ndef converge_formation(formation):\n \"\"\"\n Converge all nodes in a formation.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param formation: a :class:`~api.models.Formation` to converge\n :returns: the combined output of the nodes' convergence commands\n \"\"\"\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.s(n.id,\n n.layer.flavor.ssh_username,\n n.fqdn,\n n.layer.flavor.ssh_private_key)\n subtasks.append(subtask)\n job = group(*subtasks)\n return job.apply_async().join()\n\n\ndef publish_user(user, data):\n \"\"\"\n Publish a user to configuration management.\n\n :param user: a dict containing the username\n :param data: data to store with the user\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-users', user['username'], data)\n\n\ndef publish_app(app, data):\n \"\"\"\n Publish an app to configuration management.\n\n :param app: a dict containing the id of the app\n :param data: data to store with the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-apps', app['id'], data)\n\n\ndef purge_app(app):\n \"\"\"\n Purge an app from configuration management.\n\n :param app: a dict containing the id of the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-apps', app['id'])\n\n\ndef publish_formation(formation, data):\n \"\"\"\n Publish a formation to configuration management.\n\n :param formation: a dict containing the id of the formation\n :param data: data to store with the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-formations', formation['id'], data)\n\n\ndef purge_formation(formation):\n \"\"\"\n Purge a formation from configuration management.\n\n :param formation: a dict containing the id of the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-formations', formation['id'])\n\n\ndef _publish(data_bag, item_name, item_value):\n \"\"\"\n Publish a data bag item to the Chef server.\n\n :param data_bag: the name of a Chef data bag\n :param item_name: the name of the item to publish\n :param item_value: the value of the item to publish\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.update_databag_item(data_bag, item_name, item_value)\n if status != 200:\n body, status = client.create_databag_item(data_bag, item_name, item_value)\n if status != 201:\n raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))\n return body, status\n\n\ndef _purge(databag_name, item_name):\n \"\"\"\n Purge a data bag item from the Chef server.\n\n :param databag_name: the name of a Chef data bag\n :param item_name: the name of the item to purge\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.delete_databag_item(databag_name, item_name)\n if status == 200 or status == 404:\n return body, status\n raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))\n", "path": "cm/chef.py" } ]
[ { "content": "\"\"\"\nDeis configuration management implementation for Opscode Chef.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport time\nimport socket\n\nfrom celery.canvas import group\n\nfrom api.ssh import exec_ssh, connect_ssh\nfrom cm.chef_api import ChefAPI\n\n\nCHEF_CONFIG_PATH = '/etc/chef'\nCHEF_INSTALL_TYPE = 'gems'\nCHEF_RUBY_VERSION = '1.9.1'\nCHEF_ENVIRONMENT = '_default'\nCHEF_CLIENT_VERSION = '11.6.2'\n\n# load chef config using CHEF_CONFIG_PATH\ntry:\n # parse controller's chef config for server_url and client_name\n _client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')\n if not os.path.exists(_client_cfg_path):\n raise EnvironmentError('Could not find {}'.format(_client_cfg_path))\n with open(_client_cfg_path) as f:\n _data = f.read()\n # construct a dict from the ruby client.rb\n _d = {}\n for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \\t]+(.*)$''',\n _data, re.MULTILINE):\n _d[m[0]] = m[1].strip(\"'\").strip('\"')\n # set global variables from client.rb\n CHEF_SERVER_URL = _d['chef_server_url']\n CHEF_NODE_NAME = _d.get('node_name', socket.gethostname())\n CHEF_CLIENT_NAME = _d.get('node_name', socket.gethostname())\n CHEF_VALIDATION_NAME = _d['validation_client_name']\n # read the client key\n _client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')\n CHEF_CLIENT_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _client_pem_path]).strip('\\n')\n # read the validation key\n _valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')\n CHEF_VALIDATION_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _valid_pem_path]).strip('\\n')\nexcept Exception as err:\n msg = \"Failed to auto-configure Chef -- {}\".format(err)\n if os.environ.get('READTHEDOCS'):\n # Just print the error if Sphinx is running\n print(msg)\n else:\n raise EnvironmentError(msg)\n\n\ndef _get_client():\n \"\"\"\n Return a new instance of a Chef API Client\n\n :rtype: a :class:`~cm.chef_api.ChefAPI` object\n \"\"\"\n return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)\n\n\ndef bootstrap_node(node):\n \"\"\"\n Bootstrap the Chef configuration management tools onto a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :raises: RuntimeError\n \"\"\"\n # block until we can connect over ssh\n ssh = connect_ssh(node['ssh_username'], node['fqdn'], node.get('ssh_port', 22),\n node['ssh_private_key'], timeout=120)\n # block until ubuntu cloud-init is finished\n initializing = True\n while initializing:\n time.sleep(10)\n initializing, _rc = exec_ssh(ssh, 'ps auxw | egrep \"cloud-init\" | grep -v egrep')\n # write out private key and prepare to `knife bootstrap`\n try:\n _, pk_path = tempfile.mkstemp()\n _, output_path = tempfile.mkstemp()\n with open(pk_path, 'w') as f:\n f.write(node['ssh_private_key'])\n # build knife bootstrap command\n args = ['knife', 'bootstrap', node['fqdn']]\n args.extend(['--identity-file', pk_path])\n args.extend(['--node-name', node['id']])\n args.extend(['--sudo', '--ssh-user', node['ssh_username']])\n args.extend(['--ssh-port', str(node.get('ssh_port', 22))])\n args.extend(['--bootstrap-version', CHEF_CLIENT_VERSION])\n args.extend(['--no-host-key-verify'])\n args.extend(['--run-list', _construct_run_list(node)])\n print(' '.join(args))\n # tee the command's output to a tempfile\n args.extend(['|', 'tee', output_path])\n # TODO: figure out why home isn't being set correctly for knife exec\n env = os.environ.copy()\n env['HOME'] = '/opt/deis'\n # execute knife bootstrap\n p = subprocess.Popen(' '.join(args), env=env, shell=True)\n rc = p.wait()\n # always print knife output\n with open(output_path) as f:\n output = f.read()\n print(output)\n # raise an exception if bootstrap failed\n if rc != 0:\n raise RuntimeError('Node Bootstrap Error')\n # remove temp files from filesystem\n finally:\n os.remove(pk_path)\n os.remove(output_path)\n\n\ndef _construct_run_list(node):\n config = node['config']\n # if run_list override specified, use it (assumes csv)\n run_list = config.get('run_list', [])\n # otherwise construct a run_list using proxy/runtime flags\n if not run_list:\n run_list = ['recipe[deis]']\n if node.get('runtime') is True:\n run_list.append('recipe[deis::runtime]')\n if node.get('proxy') is True:\n run_list.append('recipe[deis::proxy]')\n return ','.join(run_list)\n\n\ndef purge_node(node):\n \"\"\"\n Purge a node and its client from Chef configuration management.\n\n :param node: a dict containing the id of a node to purge\n \"\"\"\n client = _get_client()\n client.delete_node(node['id'])\n client.delete_client(node['id'])\n\n\ndef converge_controller():\n \"\"\"\n Converge this controller node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :returns: the output of the convergence command, in this case `sudo chef-client`\n \"\"\"\n try:\n return subprocess.check_output(['sudo', 'chef-client'])\n except subprocess.CalledProcessError as err:\n print(err)\n print(err.output)\n raise err\n\n\ndef converge_node(node):\n \"\"\"\n Converge a node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :returns: a tuple of the convergence command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'],\n node['fqdn'], 22,\n node['ssh_private_key'])\n output, rc = exec_ssh(ssh, 'sudo chef-client')\n print(output)\n if rc != 0:\n e = RuntimeError('Node converge error')\n e.output = output\n raise e\n return output, rc\n\n\ndef run_node(node, command):\n \"\"\"\n Run a command on a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :param command: the command-line to execute on the node\n :returns: a tuple of the command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'], node['fqdn'],\n node['ssh_port'], node['ssh_private_key'])\n output, rc = exec_ssh(ssh, command, pty=True)\n return output, rc\n\n\ndef converge_formation(formation):\n \"\"\"\n Converge all nodes in a formation.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param formation: a :class:`~api.models.Formation` to converge\n :returns: the combined output of the nodes' convergence commands\n \"\"\"\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.s(n.id,\n n.layer.flavor.ssh_username,\n n.fqdn,\n n.layer.flavor.ssh_private_key)\n subtasks.append(subtask)\n job = group(*subtasks)\n return job.apply_async().join()\n\n\ndef publish_user(user, data):\n \"\"\"\n Publish a user to configuration management.\n\n :param user: a dict containing the username\n :param data: data to store with the user\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-users', user['username'], data)\n\n\ndef publish_app(app, data):\n \"\"\"\n Publish an app to configuration management.\n\n :param app: a dict containing the id of the app\n :param data: data to store with the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-apps', app['id'], data)\n\n\ndef purge_app(app):\n \"\"\"\n Purge an app from configuration management.\n\n :param app: a dict containing the id of the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-apps', app['id'])\n\n\ndef publish_formation(formation, data):\n \"\"\"\n Publish a formation to configuration management.\n\n :param formation: a dict containing the id of the formation\n :param data: data to store with the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-formations', formation['id'], data)\n\n\ndef purge_formation(formation):\n \"\"\"\n Purge a formation from configuration management.\n\n :param formation: a dict containing the id of the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-formations', formation['id'])\n\n\ndef _publish(data_bag, item_name, item_value):\n \"\"\"\n Publish a data bag item to the Chef server.\n\n :param data_bag: the name of a Chef data bag\n :param item_name: the name of the item to publish\n :param item_value: the value of the item to publish\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.update_databag_item(data_bag, item_name, item_value)\n if status != 200:\n body, status = client.create_databag_item(data_bag, item_name, item_value)\n if status != 201:\n raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))\n return body, status\n\n\ndef _purge(databag_name, item_name):\n \"\"\"\n Purge a data bag item from the Chef server.\n\n :param databag_name: the name of a Chef data bag\n :param item_name: the name of the item to purge\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.delete_databag_item(databag_name, item_name)\n if status == 200 or status == 404:\n return body, status\n raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))\n", "path": "cm/chef.py" } ]
diff --git a/cm/chef.py b/cm/chef.py index d8b53792ad..9203995fa1 100644 --- a/cm/chef.py +++ b/cm/chef.py @@ -21,7 +21,7 @@ CHEF_INSTALL_TYPE = 'gems' CHEF_RUBY_VERSION = '1.9.1' CHEF_ENVIRONMENT = '_default' -CHEF_CLIENT_VERSION = '11.4.4' +CHEF_CLIENT_VERSION = '11.6.2' # load chef config using CHEF_CONFIG_PATH try: diff --git a/contrib/digitalocean/provision-digitalocean-controller.sh b/contrib/digitalocean/provision-digitalocean-controller.sh index b43b16a2cb..07d46b5ac8 100755 --- a/contrib/digitalocean/provision-digitalocean-controller.sh +++ b/contrib/digitalocean/provision-digitalocean-controller.sh @@ -39,7 +39,7 @@ fi node_name=deis-controller run_list="recipe[deis::controller]" -chef_version=11.4.4 +chef_version=11.6.2 ########################## # digital ocean settings # @@ -90,6 +90,7 @@ echo_color "Provisioning $node_name with knife digital_ocean..." set -x knife digital_ocean droplet create \ + --bootstrap-version $chef_version \ --server-name $node_name \ --image $image_id \ --location $location_id \ diff --git a/contrib/ec2/provision-ec2-controller.sh b/contrib/ec2/provision-ec2-controller.sh index 669d668830..54c9371e38 100755 --- a/contrib/ec2/provision-ec2-controller.sh +++ b/contrib/ec2/provision-ec2-controller.sh @@ -33,7 +33,7 @@ fi ################# node_name=deis-controller run_list="recipe[deis::controller]" -chef_version=11.4.4 +chef_version=11.6.2 ####################### # Amazon EC2 settings # diff --git a/contrib/rackspace/provision-rackspace-controller.sh b/contrib/rackspace/provision-rackspace-controller.sh index 935bb63bc1..dfcf0e061b 100755 --- a/contrib/rackspace/provision-rackspace-controller.sh +++ b/contrib/rackspace/provision-rackspace-controller.sh @@ -29,7 +29,7 @@ fi ################# node_name=deis-controller run_list="recipe[deis::controller]" -chef_version=11.4.4 +chef_version=11.6.2 ###################### # Rackspace settings # diff --git a/contrib/vagrant/provision-vagrant-controller.sh b/contrib/vagrant/provision-vagrant-controller.sh index 9dee94289a..714d70da8a 100755 --- a/contrib/vagrant/provision-vagrant-controller.sh +++ b/contrib/vagrant/provision-vagrant-controller.sh @@ -18,7 +18,7 @@ fi ################# node_name=deis-controller run_list="recipe[deis::controller]" -chef_version=11.4.4 +chef_version=11.6.2 ################ # SSH settings #
scikit-hep__pyhf-457
Determine if papermill v1.0 API change is a problem # Description The [papermill `v1.0` release will introduce API breaking changes](https://github.com/nteract/papermill/blob/d554193bc458797b63af1f94964883d5dcca2418/README.md). It would be good to determine if these changes will matter for pyhf testing and require the addition of [scrapbook](https://nteract-scrapbook.readthedocs.io/en/latest/) or if the API change doesn't affect pyhf.
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py" } ]
diff --git a/docs/examples/notebooks/multiBinPois.ipynb b/docs/examples/notebooks/multiBinPois.ipynb index dbd8356eeb..2fae86c610 100644 --- a/docs/examples/notebooks/multiBinPois.ipynb +++ b/docs/examples/notebooks/multiBinPois.ipynb @@ -30,24 +30,7 @@ "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/jovyan/pyhf/lib/python3.6/site-packages/ansiwrap/core.py:11: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/jovyan/pyhf/lib/python3.6/site-packages/textwrap3.py' mode='r' encoding='utf-8'>\n", - " a_textwrap = imp.load_module('a_textwrap', *imp.find_module('textwrap3'))\n", - "/home/jovyan/pyhf/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28: DeprecationWarning: `nbconvert.exporters.exporter_locator` is deprecated in favor of `nbconvert.exporters.base` since nbconvert 5.0.\n", - " DeprecationWarning)\n", - "/home/jovyan/pyhf/lib/python3.6/site-packages/nbconvert/preprocessors/regexremove.py:41: DeprecationWarning: Traits should be given as instances, not types (for example, `Int()`, not `Int`). Passing types is deprecated in traitlets 4.1.\n", - " patterns = List(Unicode, default_value=[r'\\Z']).tag(config=True)\n", - "/home/jovyan/pyhf/lib/python3.6/site-packages/traitlets/traitlets.py:2367: DeprecationWarning: Traits should be given as instances, not types (for example, `Int()`, not `Int`). Passing types is deprecated in traitlets 4.1.\n", - " super(Set, self).__init__(trait, default_value, minlen, maxlen, **kwargs)\n", - "/home/jovyan/pyhf/lib/python3.6/site-packages/tornado/web.py:1747: DeprecationWarning: @asynchronous is deprecated, use coroutines instead\n", - " DeprecationWarning)\n" - ] - } - ], + "outputs": [], "source": [ "import logging\n", "import json\n", @@ -57,7 +40,7 @@ "from pyhf.simplemodels import hepdata_like\n", "\n", "from scipy.interpolate import griddata\n", - "import papermill as pm" + "import scrapbook as sb" ] }, { @@ -364,7 +347,7 @@ } ], "source": [ - "pm.record(\"number_2d_successpoints\", len(X))" + "sb.glue(\"number_2d_successpoints\", len(X))" ] } ], diff --git a/setup.py b/setup.py index 3971b8e076..d30789cfd7 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,8 @@ 'jupyter', 'nbdime', 'uproot~=3.3', - 'papermill~=0.16', + 'papermill~=1.0', + 'nteract-scrapbook~=0.2', 'graphviz', 'bumpversion', 'sphinx', diff --git a/tests/test_notebooks.py b/tests/test_notebooks.py index 58f6488cf5..e7db67dd75 100644 --- a/tests/test_notebooks.py +++ b/tests/test_notebooks.py @@ -1,6 +1,7 @@ import sys import os import papermill as pm +import scrapbook as sb def test_notebooks(tmpdir): @@ -39,5 +40,5 @@ def test_notebooks(tmpdir): **common_kwargs ) - nb = pm.read_notebook(str(outputnb)) - assert nb.data['number_2d_successpoints'] > 200 + nb = sb.read_notebook(str(outputnb)) + assert nb.scraps['number_2d_successpoints'].data > 200
OpenNMT__OpenNMT-py-2204
torch.div() (1.6.0) does not have 'rounding_mode' parameter According to the torch 1.6.0 here: https://pytorch.org/docs/1.6.0/generated/torch.div.html?highlight=torch%20div#torch.div there is no 'rounding_mode' parameter. But in translator: https://github.com/OpenNMT/OpenNMT-py/blob/0f411ce11a83b18c0223ac94ccc11a35403763df/onmt/translate/beam_search.py#L282 That's why I receive this error: ``` onmt_translate -model ./../output/test/nmt/f0/run/model_step_100.pt -src ./../output/test/nmt/f0/src-test.txt -output ./../output/test/nmt/f0/test.epoch100.pred.csv -gpu 0 --min_length 2 -verbose [2022-09-15 20:32:19,980 INFO] Translating shard 0. Traceback (most recent call last): File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "C:\ProgramData\Anaconda3\envs\nuecg\Scripts\onmt_translate.exe\__main__.py", line 7, in <module> File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 54, in main translate(opt) File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 38, in translate align_debug=opt.align_debug File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 440, in translate phrase_table=phrase_table) File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 487, in _translate batch, data.src_vocabs, attn_debug File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 861, in translate_batch batch, src_vocabs, decode_strategy File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 947, in _translate_batch_with_strategy decode_strategy.advance(log_probs, attn) File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\beam_search.py", line 283, in advance rounding_mode='trunc') TypeError: div() got an unexpected keyword argument 'rounding_mode' ```
[ { "content": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='OpenNMT-py',\n description='A python implementation of OpenNMT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version='2.3.0',\n packages=find_packages(),\n project_urls={\n \"Documentation\": \"http://opennmt.net/OpenNMT-py/\",\n \"Forum\": \"http://forum.opennmt.net/\",\n \"Gitter\": \"https://gitter.im/OpenNMT/OpenNMT-py\",\n \"Source\": \"https://github.com/OpenNMT/OpenNMT-py/\"\n },\n python_requires=\">=3.5\",\n install_requires=[\n \"torch>=1.6.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n \"flask\",\n \"waitress\",\n \"pyonmttok>=1.23,<2\",\n \"pyyaml\",\n \"sacrebleu\"\n ],\n entry_points={\n \"console_scripts\": [\n \"onmt_server=onmt.bin.server:main\",\n \"onmt_train=onmt.bin.train:main\",\n \"onmt_translate=onmt.bin.translate:main\",\n \"onmt_translate_dynamic=onmt.bin.translate_dynamic:main\",\n \"onmt_release_model=onmt.bin.release_model:main\",\n \"onmt_average_models=onmt.bin.average_models:main\",\n \"onmt_build_vocab=onmt.bin.build_vocab:main\"\n ],\n }\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='OpenNMT-py',\n description='A python implementation of OpenNMT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version='2.3.0',\n packages=find_packages(),\n project_urls={\n \"Documentation\": \"http://opennmt.net/OpenNMT-py/\",\n \"Forum\": \"http://forum.opennmt.net/\",\n \"Gitter\": \"https://gitter.im/OpenNMT/OpenNMT-py\",\n \"Source\": \"https://github.com/OpenNMT/OpenNMT-py/\"\n },\n python_requires=\">=3.5\",\n install_requires=[\n \"torch>=1.9.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n \"flask\",\n \"waitress\",\n \"pyonmttok>=1.23,<2\",\n \"pyyaml\",\n \"sacrebleu\"\n ],\n entry_points={\n \"console_scripts\": [\n \"onmt_server=onmt.bin.server:main\",\n \"onmt_train=onmt.bin.train:main\",\n \"onmt_translate=onmt.bin.translate:main\",\n \"onmt_translate_dynamic=onmt.bin.translate_dynamic:main\",\n \"onmt_release_model=onmt.bin.release_model:main\",\n \"onmt_average_models=onmt.bin.average_models:main\",\n \"onmt_build_vocab=onmt.bin.build_vocab:main\"\n ],\n }\n)\n", "path": "setup.py" } ]
diff --git a/README.md b/README.md index 3ed886afeb..051cd39cae 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Table of Contents OpenNMT-py requires: - Python >= 3.6 -- PyTorch == 1.6.0 +- PyTorch >= 1.9.0 Install `OpenNMT-py` from `pip`: ```bash @@ -251,7 +251,7 @@ The original code was written by [Adam Lerer](http://github.com/adamlerer) (NYC) Major contributors are: * [Sasha Rush](https://github.com/srush) (Cambridge, MA) -* [Vincent Nguyen](https://github.com/vince62s) (Ubiqus) +* [Vincent Nguyen](https://github.com/vince62s) (ex-Ubiqus) * [Ben Peters](http://github.com/bpopeters) (Lisbon) * [Sebastian Gehrmann](https://github.com/sebastianGehrmann) (Harvard NLP) * [Yuntian Deng](https://github.com/da03) (Harvard NLP) diff --git a/setup.py b/setup.py index 988123d49c..d2c1fb701c 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ }, python_requires=">=3.5", install_requires=[ - "torch>=1.6.0", + "torch>=1.9.0", "torchtext==0.5.0", "configargparse", "tensorboard>=2.3",
pytorch__ignite-844
Typehint of ignite._utils._to_hours_mins_secs not satisfied with float ## 🐛 Bug description That is a so tiny bug. The `typehint` of the following function of `ignite._utils` is not satisfied with a `float` argument ``` python def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs ``` We have ```python >>> divmod(10.0,2) (5.0, 0.0) ``` ## Environment - PyTorch Version (e.g., 1.4): 1.4 - Ignite Version (e.g., 0.3.0): 0.3.0 - OS (e.g., Linux): Linux - How you installed Ignite (`conda`, `pip`, source): conda - Python version: 3.7 - Any other relevant information:
[ { "content": "from typing import Union, Tuple\n\n# For compatibilty\nfrom ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot\n\n\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n", "path": "ignite/_utils.py" } ]
[ { "content": "from typing import Union, Tuple\n\n# For compatibilty\nfrom ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot\n\n\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return round(hours), round(mins), round(secs)\n", "path": "ignite/_utils.py" } ]
diff --git a/ignite/_utils.py b/ignite/_utils.py index 0021435c96c9..fe5d830151b5 100644 --- a/ignite/_utils.py +++ b/ignite/_utils.py @@ -8,4 +8,4 @@ def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) - return hours, mins, secs + return round(hours), round(mins), round(secs)
tobymao__sqlglot-2170
Specifying 3.7+ in setup.py sqlglot fails with Python version < 3.7. While Python 3.6 is end-of-life, there are systems that still use 3.6 as they transition to 3.7. Having either `setup.py` or `setup.cfg` specify 3.7+ or adding to README would be helpful. ``` import sqlglot ``` `SyntaxError: future feature annotations is not defined` **Official Documentation** https://docs.python.org/3/library/__future__.html
[ { "content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"sqlglot\",\n description=\"An easily customizable SQL parser and transpiler\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tobymao/sqlglot\",\n author=\"Toby Mao\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(include=[\"sqlglot\", \"sqlglot.*\"]),\n package_data={\"sqlglot\": [\"py.typed\"]},\n use_scm_version={\n \"write_to\": \"sqlglot/_version.py\",\n \"fallback_version\": \"0.0.0\",\n \"local_scheme\": \"no-local-version\",\n },\n setup_requires=[\"setuptools_scm\"],\n extras_require={\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"duckdb>=0.6\",\n \"isort\",\n \"mypy>=0.990\",\n \"pandas\",\n \"pyspark\",\n \"python-dateutil\",\n \"pdoc\",\n \"pre-commit\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: SQL\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"sqlglot\",\n description=\"An easily customizable SQL parser and transpiler\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tobymao/sqlglot\",\n author=\"Toby Mao\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(include=[\"sqlglot\", \"sqlglot.*\"]),\n package_data={\"sqlglot\": [\"py.typed\"]},\n use_scm_version={\n \"write_to\": \"sqlglot/_version.py\",\n \"fallback_version\": \"0.0.0\",\n \"local_scheme\": \"no-local-version\",\n },\n setup_requires=[\"setuptools_scm\"],\n python_requires=\">=3.7\",\n extras_require={\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"duckdb>=0.6\",\n \"isort\",\n \"mypy>=0.990\",\n \"pandas\",\n \"pyspark\",\n \"python-dateutil\",\n \"pdoc\",\n \"pre-commit\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: SQL\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index cffaeade7e..8c67a07620 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ "local_scheme": "no-local-version", }, setup_requires=["setuptools_scm"], + python_requires=">=3.7", extras_require={ "dev": [ "autoflake",
GeotrekCE__Geotrek-admin-4004
Signalétiques : les lames supprimées apparaissent toujours dans la fiche détail d'une signalétique **Comportement pour reproduire :** - Créer une signalétique - Créer une lame associée à cette signalétique - Supprimer la lame qui vient d'être créée - Se rendre sur la vue détail de la signalétique **Comportement observé :** - J'observe que la lame que je viens de supprimer est toujours présente dans la liste des lames **Comportement normal** - La lame ne devrait plus apparaître dans la liste des lames d'une signalétique à partir du moment où elle a été supprimée
[ { "content": "import os\n\nfrom django.db import models\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import gettext_lazy as _, pgettext_lazy\n\nfrom django.conf import settings\n\nfrom geotrek.authent.models import StructureOrNoneRelated\nfrom geotrek.common.mixins.models import AddPropertyMixin, NoDeleteMixin, OptionalPictogramMixin, GeotrekMapEntityMixin, TimeStampedModelMixin\nfrom geotrek.common.models import Organism\nfrom geotrek.common.signals import log_cascade_deletion\nfrom geotrek.common.utils import (\n classproperty, format_coordinates, collate_c, spatial_reference, intersecting, queryset_or_model, queryset_or_all_objects\n)\n\nfrom geotrek.core.models import Topology, Path\n\nfrom geotrek.infrastructure.models import BaseInfrastructure\nfrom geotrek.signage.managers import SignageGISManager\n\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\n\nclass Sealing(TimeStampedModelMixin, StructureOrNoneRelated):\n \"\"\" A sealing linked with a signage\"\"\"\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Sealing\")\n verbose_name_plural = _(\"Sealings\")\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass SignageType(TimeStampedModelMixin, StructureOrNoneRelated, OptionalPictogramMixin):\n \"\"\" Types of infrastructures (bridge, WC, stairs, ...) \"\"\"\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Signage Type\")\n verbose_name_plural = _(\"Signage Types\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n def get_pictogram_url(self):\n pictogram_url = super().get_pictogram_url()\n if pictogram_url:\n return pictogram_url\n return os.path.join(settings.STATIC_URL, 'signage/picto-signage.png')\n\n\nclass LinePictogram(TimeStampedModelMixin, OptionalPictogramMixin):\n label = models.CharField(verbose_name=_(\"Label\"), max_length=250, blank=True, null=False, default='')\n code = models.CharField(verbose_name=_(\"Code\"), max_length=250, blank=True, null=False, default='')\n description = models.TextField(verbose_name=_(\"Description\"), blank=True, help_text=_(\"Complete description\"))\n\n class Meta:\n verbose_name = _(\"Line pictogram\")\n verbose_name_plural = _(\"Line pictograms\")\n\n def __str__(self):\n return self.label\n\n\nclass SignageCondition(TimeStampedModelMixin, StructureOrNoneRelated):\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Signage Condition\")\n verbose_name_plural = _(\"Signage Conditions\")\n ordering = [\"label\"]\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass Signage(GeotrekMapEntityMixin, BaseInfrastructure):\n \"\"\" An infrastructure in the park, which is of type SIGNAGE \"\"\"\n objects = SignageGISManager()\n code = models.CharField(verbose_name=_(\"Code\"), max_length=250, blank=True, null=False, default='')\n manager = models.ForeignKey(Organism, verbose_name=_(\"Manager\"), null=True, blank=True, on_delete=models.PROTECT)\n sealing = models.ForeignKey(Sealing, verbose_name=_(\"Sealing\"), null=True, blank=True, on_delete=models.PROTECT)\n printed_elevation = models.IntegerField(verbose_name=_(\"Printed elevation\"), blank=True, null=True)\n type = models.ForeignKey(SignageType, related_name='signages', verbose_name=_(\"Type\"), on_delete=models.PROTECT)\n coordinates_verbose_name = _(\"Coordinates\")\n conditions = models.ManyToManyField(\n SignageCondition,\n related_name='signages',\n verbose_name=_(\"Condition\"), blank=True)\n\n geometry_types_allowed = [\"POINT\"]\n\n class Meta:\n verbose_name = _(\"Signage\")\n verbose_name_plural = _(\"Signages\")\n\n @classmethod\n def path_signages(cls, path):\n if settings.TREKKING_TOPOLOGY_ENABLED:\n return cls.objects.existing().filter(aggregations__path=path).distinct('pk')\n else:\n area = path.geom.buffer(settings.TREK_SIGNAGE_INTERSECTION_MARGIN)\n return cls.objects.existing().filter(geom__intersects=area)\n\n @classmethod\n def topology_signages(cls, topology, queryset=None):\n if settings.TREKKING_TOPOLOGY_ENABLED:\n qs = cls.overlapping(topology, all_objects=queryset)\n else:\n area = topology.geom.buffer(settings.TREK_SIGNAGE_INTERSECTION_MARGIN)\n qs = queryset_or_all_objects(queryset, cls)\n qs = qs.filter(geom__intersects=area)\n return qs\n\n @classmethod\n def published_topology_signages(cls, topology):\n return cls.topology_signages(topology).filter(published=True)\n\n @classmethod\n def outdoor_signages(cls, outdoor_obj, queryset=None):\n return intersecting(qs=queryset_or_model(queryset, cls), obj=outdoor_obj)\n\n @classmethod\n def tourism_signages(cls, tourism_obj, queryset=None):\n return intersecting(qs=queryset_or_model(queryset, cls), obj=tourism_obj)\n\n @property\n def order_blades(self):\n return self.blade_set.all().order_by(collate_c('number'))\n\n @property\n def coordinates(self):\n return \"{} ({})\".format(format_coordinates(self.geom), spatial_reference())\n\n @property\n def geomtransform(self):\n geom = self.topo_object.geom\n return geom.transform(settings.API_SRID, clone=True)\n\n @property\n def lat_value(self):\n return self.geomtransform.x\n\n @property\n def lng_value(self):\n return self.geomtransform.y\n\n @property\n def conditions_display(self):\n return \", \".join([str(c) for c in self.conditions.select_related('structure').all()])\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this signage to another class\"\"\"\n return settings.TREK_SIGNAGE_INTERSECTION_MARGIN\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n for trek in self.treks.all():\n trek.save()\n\n def delete(self, *args, **kwargs):\n for trek in self.treks.all():\n trek.save()\n Blade.objects.filter(signage=self).update(deleted=True)\n super().delete(*args, **kwargs)\n\n\n@receiver(pre_delete, sender=Topology)\ndef log_cascade_deletion_from_signage_topology(sender, instance, using, **kwargs):\n # Signages are deleted when Topologies (from BaseInfrastructure) are deleted\n log_cascade_deletion(sender, instance, Signage, 'topo_object')\n\n\nPath.add_property('signages', lambda self: Signage.path_signages(self), _(\"Signages\"))\nTopology.add_property('signages', Signage.topology_signages, _(\"Signages\"))\nTopology.add_property('published_signages', lambda self: Signage.published_topology_signages(self),\n _(\"Published Signages\"))\n\n\nclass Direction(TimeStampedModelMixin, models.Model):\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Direction\")\n verbose_name_plural = _(\"Directions\")\n\n def __str__(self):\n return self.label\n\n\nclass Color(TimeStampedModelMixin, models.Model):\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Blade color\")\n verbose_name_plural = _(\"Blade colors\")\n\n def __str__(self):\n return self.label\n\n\nclass BladeType(TimeStampedModelMixin, StructureOrNoneRelated):\n \"\"\" Types of blades\"\"\"\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Blade type\")\n verbose_name_plural = _(\"Blade types\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass BladeCondition(TimeStampedModelMixin, StructureOrNoneRelated):\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Blade Condition\")\n verbose_name_plural = _(\"Blade Conditions\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass Blade(TimeStampedModelMixin, ZoningPropertiesMixin, AddPropertyMixin, GeotrekMapEntityMixin, NoDeleteMixin):\n signage = models.ForeignKey(Signage, verbose_name=_(\"Signage\"),\n on_delete=models.CASCADE)\n number = models.CharField(verbose_name=_(\"Number\"), max_length=250)\n direction = models.ForeignKey(Direction, verbose_name=_(\"Direction\"), on_delete=models.PROTECT, null=True,\n blank=True)\n type = models.ForeignKey(BladeType, verbose_name=_(\"Type\"), on_delete=models.PROTECT)\n color = models.ForeignKey(Color, on_delete=models.PROTECT, null=True, blank=True,\n verbose_name=_(\"Color\"))\n conditions = models.ManyToManyField(\n BladeCondition,\n related_name='blades',\n verbose_name=_(\"Condition\"), blank=True)\n topology = models.ForeignKey(Topology, related_name=\"blades_set\", verbose_name=_(\"Blades\"), on_delete=models.CASCADE)\n colorblade_verbose_name = _(\"Color\")\n printedelevation_verbose_name = _(\"Printed elevation\")\n direction_verbose_name = _(\"Direction\")\n city_verbose_name = _(\"City\")\n bladecode_verbose_name = _(\"Code\")\n coordinates_verbose_name = \"{} ({})\".format(_(\"Coordinates\"), spatial_reference())\n can_duplicate = False\n\n class Meta:\n verbose_name = _(\"Blade\")\n verbose_name_plural = _(\"Blades\")\n\n @property\n def zoning_property(self):\n return self.signage\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n def __str__(self):\n return settings.BLADE_CODE_FORMAT.format(signagecode=self.signage.code, bladenumber=self.number)\n\n def set_topology(self, topology):\n self.topology = topology\n if not self.is_signage:\n raise ValueError(\"Expecting a signage\")\n\n @property\n def conditions_display(self):\n return \", \".join([str(c) for c in self.conditions.select_related('structure').all()])\n\n @property\n def paths(self):\n return self.signage.paths.all()\n\n @property\n def is_signage(self):\n if self.topology:\n return self.topology.kind == Signage.KIND\n return False\n\n @property\n def geom(self):\n return self.signage.geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def signage_display(self):\n return '<img src=\"%simages/signage-16.png\" title=\"Signage\">' % settings.STATIC_URL\n\n @property\n def order_lines(self):\n return self.lines.order_by('number')\n\n @property\n def number_display(self):\n s = '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk, self.get_detail_url(), self, self)\n return s\n\n @property\n def name_display(self):\n s = '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self,\n self)\n return s\n\n @property\n def structure(self):\n return self.signage.structure\n\n def same_structure(self, user):\n \"\"\" Returns True if the user is in the same structure or has\n bypass_structure permission, False otherwise. \"\"\"\n return (user.profile.structure == self.structure\n or user.is_superuser\n or user.has_perm('authent.can_bypass_structure'))\n\n @property\n def bladecode_csv_display(self):\n return settings.BLADE_CODE_FORMAT.format(signagecode=self.signage.code,\n bladenumber=self.number)\n\n @property\n def coordinates_csv_display(self):\n return self.coordinates or \"\"\n\n @property\n def printedelevation_csv_display(self):\n return self.signage.printed_elevation or \"\"\n\n @property\n def city_csv_display(self):\n return self.signage.cities[0] if self.signage.cities else \"\"\n\n @property\n def coordinates(self):\n return format_coordinates(self.geom)\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this blade to another class\"\"\"\n return settings.TREK_SIGNAGE_INTERSECTION_MARGIN\n\n\n@receiver(pre_delete, sender=Topology)\ndef log_cascade_deletion_from_blade_topology(sender, instance, using, **kwargs):\n # Blade are deleted when Topology are deleted\n log_cascade_deletion(sender, instance, Blade, 'topology')\n\n\n@receiver(pre_delete, sender=Signage)\ndef log_cascade_deletion_from_blade_signage(sender, instance, using, **kwargs):\n # Blade are deleted when Signage are deleted\n log_cascade_deletion(sender, instance, Blade, 'signage')\n\n\nclass Line(models.Model):\n blade = models.ForeignKey(Blade, related_name='lines', verbose_name=_(\"Blade\"),\n on_delete=models.CASCADE)\n number = models.IntegerField(verbose_name=_(\"Number\"))\n direction = models.ForeignKey(Direction, verbose_name=_(\"Direction\"), on_delete=models.PROTECT, null=True,\n blank=True)\n text = models.CharField(verbose_name=_(\"Text\"), max_length=1000, blank=True, default=\"\")\n distance = models.DecimalField(verbose_name=_(\"Distance\"), null=True, blank=True,\n decimal_places=1, max_digits=8, help_text='km')\n pictograms = models.ManyToManyField('LinePictogram', related_name=\"lines\",\n blank=True,\n verbose_name=_(\"Pictograms\"))\n time = models.DurationField(verbose_name=pgettext_lazy(\"duration\", \"Time\"), null=True, blank=True,\n help_text=_(\"Hours:Minutes:Seconds\"))\n distance_pretty_verbose_name = _(\"Distance\")\n time_pretty_verbose_name = _(\"Time\")\n linecode_verbose_name = _(\"Code\")\n\n def __str__(self):\n return self.linecode\n\n @property\n def linecode(self):\n return settings.LINE_CODE_FORMAT.format(signagecode=self.blade.signage.code,\n bladenumber=self.blade.number,\n linenumber=self.number)\n\n @property\n def distance_pretty(self):\n if not self.distance:\n return \"\"\n return settings.LINE_DISTANCE_FORMAT.format(self.distance)\n\n @property\n def time_pretty(self):\n if not self.time:\n return \"\"\n hours = self.time.seconds // 3600\n minutes = (self.time.seconds % 3600) // 60\n seconds = self.time.seconds % 60\n return settings.LINE_TIME_FORMAT.format(hours=hours, minutes=minutes, seconds=seconds)\n\n class Meta:\n unique_together = (('blade', 'number'), )\n verbose_name = _(\"Line\")\n verbose_name_plural = _(\"Lines\")\n\n\n@receiver(pre_delete, sender=Blade)\ndef log_cascade_deletion_from_line_blade(sender, instance, using, **kwargs):\n # Lines are deleted when Blade are deleted\n log_cascade_deletion(sender, instance, Line, 'blade')\n", "path": "geotrek/signage/models.py" } ]
[ { "content": "import os\n\nfrom django.db import models\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import gettext_lazy as _, pgettext_lazy\n\nfrom django.conf import settings\n\nfrom geotrek.authent.models import StructureOrNoneRelated\nfrom geotrek.common.mixins.models import AddPropertyMixin, NoDeleteMixin, OptionalPictogramMixin, GeotrekMapEntityMixin, TimeStampedModelMixin\nfrom geotrek.common.models import Organism\nfrom geotrek.common.signals import log_cascade_deletion\nfrom geotrek.common.utils import (\n classproperty, format_coordinates, collate_c, spatial_reference, intersecting, queryset_or_model, queryset_or_all_objects\n)\n\nfrom geotrek.core.models import Topology, Path\n\nfrom geotrek.infrastructure.models import BaseInfrastructure\nfrom geotrek.signage.managers import SignageGISManager\n\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\n\nclass Sealing(TimeStampedModelMixin, StructureOrNoneRelated):\n \"\"\" A sealing linked with a signage\"\"\"\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Sealing\")\n verbose_name_plural = _(\"Sealings\")\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass SignageType(TimeStampedModelMixin, StructureOrNoneRelated, OptionalPictogramMixin):\n \"\"\" Types of infrastructures (bridge, WC, stairs, ...) \"\"\"\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Signage Type\")\n verbose_name_plural = _(\"Signage Types\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n def get_pictogram_url(self):\n pictogram_url = super().get_pictogram_url()\n if pictogram_url:\n return pictogram_url\n return os.path.join(settings.STATIC_URL, 'signage/picto-signage.png')\n\n\nclass LinePictogram(TimeStampedModelMixin, OptionalPictogramMixin):\n label = models.CharField(verbose_name=_(\"Label\"), max_length=250, blank=True, null=False, default='')\n code = models.CharField(verbose_name=_(\"Code\"), max_length=250, blank=True, null=False, default='')\n description = models.TextField(verbose_name=_(\"Description\"), blank=True, help_text=_(\"Complete description\"))\n\n class Meta:\n verbose_name = _(\"Line pictogram\")\n verbose_name_plural = _(\"Line pictograms\")\n\n def __str__(self):\n return self.label\n\n\nclass SignageCondition(TimeStampedModelMixin, StructureOrNoneRelated):\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Signage Condition\")\n verbose_name_plural = _(\"Signage Conditions\")\n ordering = [\"label\"]\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass Signage(GeotrekMapEntityMixin, BaseInfrastructure):\n \"\"\" An infrastructure in the park, which is of type SIGNAGE \"\"\"\n objects = SignageGISManager()\n code = models.CharField(verbose_name=_(\"Code\"), max_length=250, blank=True, null=False, default='')\n manager = models.ForeignKey(Organism, verbose_name=_(\"Manager\"), null=True, blank=True, on_delete=models.PROTECT)\n sealing = models.ForeignKey(Sealing, verbose_name=_(\"Sealing\"), null=True, blank=True, on_delete=models.PROTECT)\n printed_elevation = models.IntegerField(verbose_name=_(\"Printed elevation\"), blank=True, null=True)\n type = models.ForeignKey(SignageType, related_name='signages', verbose_name=_(\"Type\"), on_delete=models.PROTECT)\n coordinates_verbose_name = _(\"Coordinates\")\n conditions = models.ManyToManyField(\n SignageCondition,\n related_name='signages',\n verbose_name=_(\"Condition\"), blank=True)\n\n geometry_types_allowed = [\"POINT\"]\n\n class Meta:\n verbose_name = _(\"Signage\")\n verbose_name_plural = _(\"Signages\")\n\n @classmethod\n def path_signages(cls, path):\n if settings.TREKKING_TOPOLOGY_ENABLED:\n return cls.objects.existing().filter(aggregations__path=path).distinct('pk')\n else:\n area = path.geom.buffer(settings.TREK_SIGNAGE_INTERSECTION_MARGIN)\n return cls.objects.existing().filter(geom__intersects=area)\n\n @classmethod\n def topology_signages(cls, topology, queryset=None):\n if settings.TREKKING_TOPOLOGY_ENABLED:\n qs = cls.overlapping(topology, all_objects=queryset)\n else:\n area = topology.geom.buffer(settings.TREK_SIGNAGE_INTERSECTION_MARGIN)\n qs = queryset_or_all_objects(queryset, cls)\n qs = qs.filter(geom__intersects=area)\n return qs\n\n @classmethod\n def published_topology_signages(cls, topology):\n return cls.topology_signages(topology).filter(published=True)\n\n @classmethod\n def outdoor_signages(cls, outdoor_obj, queryset=None):\n return intersecting(qs=queryset_or_model(queryset, cls), obj=outdoor_obj)\n\n @classmethod\n def tourism_signages(cls, tourism_obj, queryset=None):\n return intersecting(qs=queryset_or_model(queryset, cls), obj=tourism_obj)\n\n @property\n def order_blades(self):\n return self.blade_set.existing().order_by(collate_c('number'))\n\n @property\n def coordinates(self):\n return \"{} ({})\".format(format_coordinates(self.geom), spatial_reference())\n\n @property\n def geomtransform(self):\n geom = self.topo_object.geom\n return geom.transform(settings.API_SRID, clone=True)\n\n @property\n def lat_value(self):\n return self.geomtransform.x\n\n @property\n def lng_value(self):\n return self.geomtransform.y\n\n @property\n def conditions_display(self):\n return \", \".join([str(c) for c in self.conditions.select_related('structure').all()])\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this signage to another class\"\"\"\n return settings.TREK_SIGNAGE_INTERSECTION_MARGIN\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n for trek in self.treks.all():\n trek.save()\n\n def delete(self, *args, **kwargs):\n for trek in self.treks.all():\n trek.save()\n Blade.objects.filter(signage=self).update(deleted=True)\n super().delete(*args, **kwargs)\n\n\n@receiver(pre_delete, sender=Topology)\ndef log_cascade_deletion_from_signage_topology(sender, instance, using, **kwargs):\n # Signages are deleted when Topologies (from BaseInfrastructure) are deleted\n log_cascade_deletion(sender, instance, Signage, 'topo_object')\n\n\nPath.add_property('signages', lambda self: Signage.path_signages(self), _(\"Signages\"))\nTopology.add_property('signages', Signage.topology_signages, _(\"Signages\"))\nTopology.add_property('published_signages', lambda self: Signage.published_topology_signages(self),\n _(\"Published Signages\"))\n\n\nclass Direction(TimeStampedModelMixin, models.Model):\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Direction\")\n verbose_name_plural = _(\"Directions\")\n\n def __str__(self):\n return self.label\n\n\nclass Color(TimeStampedModelMixin, models.Model):\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Blade color\")\n verbose_name_plural = _(\"Blade colors\")\n\n def __str__(self):\n return self.label\n\n\nclass BladeType(TimeStampedModelMixin, StructureOrNoneRelated):\n \"\"\" Types of blades\"\"\"\n label = models.CharField(max_length=128)\n\n class Meta:\n verbose_name = _(\"Blade type\")\n verbose_name_plural = _(\"Blade types\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass BladeCondition(TimeStampedModelMixin, StructureOrNoneRelated):\n label = models.CharField(verbose_name=_(\"Name\"), max_length=250)\n\n class Meta:\n verbose_name = _(\"Blade Condition\")\n verbose_name_plural = _(\"Blade Conditions\")\n ordering = ('label',)\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.label, self.structure.name)\n return self.label\n\n\nclass Blade(TimeStampedModelMixin, ZoningPropertiesMixin, AddPropertyMixin, GeotrekMapEntityMixin, NoDeleteMixin):\n signage = models.ForeignKey(Signage, verbose_name=_(\"Signage\"),\n on_delete=models.CASCADE)\n number = models.CharField(verbose_name=_(\"Number\"), max_length=250)\n direction = models.ForeignKey(Direction, verbose_name=_(\"Direction\"), on_delete=models.PROTECT, null=True,\n blank=True)\n type = models.ForeignKey(BladeType, verbose_name=_(\"Type\"), on_delete=models.PROTECT)\n color = models.ForeignKey(Color, on_delete=models.PROTECT, null=True, blank=True,\n verbose_name=_(\"Color\"))\n conditions = models.ManyToManyField(\n BladeCondition,\n related_name='blades',\n verbose_name=_(\"Condition\"), blank=True)\n topology = models.ForeignKey(Topology, related_name=\"blades_set\", verbose_name=_(\"Blades\"), on_delete=models.CASCADE)\n colorblade_verbose_name = _(\"Color\")\n printedelevation_verbose_name = _(\"Printed elevation\")\n direction_verbose_name = _(\"Direction\")\n city_verbose_name = _(\"City\")\n bladecode_verbose_name = _(\"Code\")\n coordinates_verbose_name = \"{} ({})\".format(_(\"Coordinates\"), spatial_reference())\n can_duplicate = False\n\n class Meta:\n verbose_name = _(\"Blade\")\n verbose_name_plural = _(\"Blades\")\n\n @property\n def zoning_property(self):\n return self.signage\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n def __str__(self):\n return settings.BLADE_CODE_FORMAT.format(signagecode=self.signage.code, bladenumber=self.number)\n\n def set_topology(self, topology):\n self.topology = topology\n if not self.is_signage:\n raise ValueError(\"Expecting a signage\")\n\n @property\n def conditions_display(self):\n return \", \".join([str(c) for c in self.conditions.select_related('structure').all()])\n\n @property\n def paths(self):\n return self.signage.paths.all()\n\n @property\n def is_signage(self):\n if self.topology:\n return self.topology.kind == Signage.KIND\n return False\n\n @property\n def geom(self):\n return self.signage.geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def signage_display(self):\n return '<img src=\"%simages/signage-16.png\" title=\"Signage\">' % settings.STATIC_URL\n\n @property\n def order_lines(self):\n return self.lines.order_by('number')\n\n @property\n def number_display(self):\n s = '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk, self.get_detail_url(), self, self)\n return s\n\n @property\n def name_display(self):\n s = '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self,\n self)\n return s\n\n @property\n def structure(self):\n return self.signage.structure\n\n def same_structure(self, user):\n \"\"\" Returns True if the user is in the same structure or has\n bypass_structure permission, False otherwise. \"\"\"\n return (user.profile.structure == self.structure\n or user.is_superuser\n or user.has_perm('authent.can_bypass_structure'))\n\n @property\n def bladecode_csv_display(self):\n return settings.BLADE_CODE_FORMAT.format(signagecode=self.signage.code,\n bladenumber=self.number)\n\n @property\n def coordinates_csv_display(self):\n return self.coordinates or \"\"\n\n @property\n def printedelevation_csv_display(self):\n return self.signage.printed_elevation or \"\"\n\n @property\n def city_csv_display(self):\n return self.signage.cities[0] if self.signage.cities else \"\"\n\n @property\n def coordinates(self):\n return format_coordinates(self.geom)\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this blade to another class\"\"\"\n return settings.TREK_SIGNAGE_INTERSECTION_MARGIN\n\n\n@receiver(pre_delete, sender=Topology)\ndef log_cascade_deletion_from_blade_topology(sender, instance, using, **kwargs):\n # Blade are deleted when Topology are deleted\n log_cascade_deletion(sender, instance, Blade, 'topology')\n\n\n@receiver(pre_delete, sender=Signage)\ndef log_cascade_deletion_from_blade_signage(sender, instance, using, **kwargs):\n # Blade are deleted when Signage are deleted\n log_cascade_deletion(sender, instance, Blade, 'signage')\n\n\nclass Line(models.Model):\n blade = models.ForeignKey(Blade, related_name='lines', verbose_name=_(\"Blade\"),\n on_delete=models.CASCADE)\n number = models.IntegerField(verbose_name=_(\"Number\"))\n direction = models.ForeignKey(Direction, verbose_name=_(\"Direction\"), on_delete=models.PROTECT, null=True,\n blank=True)\n text = models.CharField(verbose_name=_(\"Text\"), max_length=1000, blank=True, default=\"\")\n distance = models.DecimalField(verbose_name=_(\"Distance\"), null=True, blank=True,\n decimal_places=1, max_digits=8, help_text='km')\n pictograms = models.ManyToManyField('LinePictogram', related_name=\"lines\",\n blank=True,\n verbose_name=_(\"Pictograms\"))\n time = models.DurationField(verbose_name=pgettext_lazy(\"duration\", \"Time\"), null=True, blank=True,\n help_text=_(\"Hours:Minutes:Seconds\"))\n distance_pretty_verbose_name = _(\"Distance\")\n time_pretty_verbose_name = _(\"Time\")\n linecode_verbose_name = _(\"Code\")\n\n def __str__(self):\n return self.linecode\n\n @property\n def linecode(self):\n return settings.LINE_CODE_FORMAT.format(signagecode=self.blade.signage.code,\n bladenumber=self.blade.number,\n linenumber=self.number)\n\n @property\n def distance_pretty(self):\n if not self.distance:\n return \"\"\n return settings.LINE_DISTANCE_FORMAT.format(self.distance)\n\n @property\n def time_pretty(self):\n if not self.time:\n return \"\"\n hours = self.time.seconds // 3600\n minutes = (self.time.seconds % 3600) // 60\n seconds = self.time.seconds % 60\n return settings.LINE_TIME_FORMAT.format(hours=hours, minutes=minutes, seconds=seconds)\n\n class Meta:\n unique_together = (('blade', 'number'), )\n verbose_name = _(\"Line\")\n verbose_name_plural = _(\"Lines\")\n\n\n@receiver(pre_delete, sender=Blade)\ndef log_cascade_deletion_from_line_blade(sender, instance, using, **kwargs):\n # Lines are deleted when Blade are deleted\n log_cascade_deletion(sender, instance, Line, 'blade')\n", "path": "geotrek/signage/models.py" } ]
diff --git a/docs/changelog.rst b/docs/changelog.rst index 5a4ffb1f4c..610de8fb35 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -9,6 +9,11 @@ CHANGELOG - Add git to Dockerfile build staging +**Bug fixes** + +- Fix bug deleted blades still displayed on detail view of signages (fix #4003) + + 2.103.1 (2024-03-15) -------------------- diff --git a/geotrek/signage/models.py b/geotrek/signage/models.py index ecb34a358b..e2aa7c2e10 100755 --- a/geotrek/signage/models.py +++ b/geotrek/signage/models.py @@ -137,7 +137,7 @@ def tourism_signages(cls, tourism_obj, queryset=None): @property def order_blades(self): - return self.blade_set.all().order_by(collate_c('number')) + return self.blade_set.existing().order_by(collate_c('number')) @property def coordinates(self):
nltk__nltk-2895
Link to book in python documentation wrong Not sure if this is a bug in the documentation or in the DNS/web server setup. The python documentation for nltk says: ``` Steven Bird, Ewan Klein, and Edward Loper (2009). Natural Language Processing with Python. O'Reilly Media Inc. http://nltk.org/book ``` but this link does not work, `https://www.nltk.org/book/` does.
[ { "content": "# Natural Language Toolkit (NLTK)\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Authors: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nThe Natural Language Toolkit (NLTK) is an open source Python library\nfor Natural Language Processing. A free online book is available.\n(If you use the library for academic research, please cite the book.)\n\nSteven Bird, Ewan Klein, and Edward Loper (2009).\nNatural Language Processing with Python. O'Reilly Media Inc.\nhttps://www.nltk.org/book\n\nisort:skip_file\n\"\"\"\n\nimport os\n\n# //////////////////////////////////////////////////////\n# Metadata\n# //////////////////////////////////////////////////////\n\n# Version. For each new release, the version number should be updated\n# in the file VERSION.\ntry:\n # If a VERSION file exists, use it!\n version_file = os.path.join(os.path.dirname(__file__), \"VERSION\")\n with open(version_file) as infile:\n __version__ = infile.read().strip()\nexcept NameError:\n __version__ = \"unknown (running code interactively?)\"\nexcept OSError as ex:\n __version__ = \"unknown (%s)\" % ex\n\nif __doc__ is not None: # fix for the ``python -OO``\n __doc__ += \"\\n@version: \" + __version__\n\n\n# Copyright notice\n__copyright__ = \"\"\"\\\nCopyright (C) 2001-2021 NLTK Project.\n\nDistributed and Licensed under the Apache License, Version 2.0,\nwhich is included by reference.\n\"\"\"\n\n__license__ = \"Apache License, Version 2.0\"\n# Description of the toolkit, keywords, and the project's primary URL.\n__longdescr__ = \"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\"\n__keywords__ = [\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n]\n__url__ = \"https://www.nltk.org/\"\n\n# Maintainer, contributors, etc.\n__maintainer__ = \"NLTK Team\"\n__maintainer_email__ = \"[email protected]\"\n__author__ = __maintainer__\n__author_email__ = __maintainer_email__\n\n# \"Trove\" classifiers for Python Package Index.\n__classifiers__ = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n]\n\nfrom nltk.internals import config_java\n\n# support numpy from pypy\ntry:\n import numpypy\nexcept ImportError:\n pass\n\n# Override missing methods on environments where it cannot be used like GAE.\nimport subprocess\n\nif not hasattr(subprocess, \"PIPE\"):\n\n def _fake_PIPE(*args, **kwargs):\n raise NotImplementedError(\"subprocess.PIPE is not supported.\")\n\n subprocess.PIPE = _fake_PIPE\nif not hasattr(subprocess, \"Popen\"):\n\n def _fake_Popen(*args, **kwargs):\n raise NotImplementedError(\"subprocess.Popen is not supported.\")\n\n subprocess.Popen = _fake_Popen\n\n###########################################################\n# TOP-LEVEL MODULES\n###########################################################\n\n# Import top-level functionality into top-level namespace\n\nfrom nltk.collocations import *\nfrom nltk.decorators import decorator, memoize\nfrom nltk.featstruct import *\nfrom nltk.grammar import *\nfrom nltk.probability import *\nfrom nltk.text import *\nfrom nltk.util import *\nfrom nltk.jsontags import *\n\n###########################################################\n# PACKAGES\n###########################################################\n\nfrom nltk.chunk import *\nfrom nltk.classify import *\nfrom nltk.inference import *\nfrom nltk.metrics import *\nfrom nltk.parse import *\nfrom nltk.tag import *\nfrom nltk.tokenize import *\nfrom nltk.translate import *\nfrom nltk.tree import *\nfrom nltk.sem import *\nfrom nltk.stem import *\n\n# Packages which can be lazily imported\n# (a) we don't import *\n# (b) they're slow to import or have run-time dependencies\n# that can safely fail at run time\n\nfrom nltk import lazyimport\n\napp = lazyimport.LazyModule(\"nltk.app\", locals(), globals())\nchat = lazyimport.LazyModule(\"nltk.chat\", locals(), globals())\ncorpus = lazyimport.LazyModule(\"nltk.corpus\", locals(), globals())\ndraw = lazyimport.LazyModule(\"nltk.draw\", locals(), globals())\ntoolbox = lazyimport.LazyModule(\"nltk.toolbox\", locals(), globals())\n\n# Optional loading\n\ntry:\n import numpy\nexcept ImportError:\n pass\nelse:\n from nltk import cluster\n\nfrom nltk.downloader import download, download_shell\n\ntry:\n import tkinter\nexcept ImportError:\n pass\nelse:\n try:\n from nltk.downloader import download_gui\n except RuntimeError as e:\n import warnings\n\n warnings.warn(\n \"Corpus downloader GUI not loaded \"\n \"(RuntimeError during import: %s)\" % str(e)\n )\n\n# explicitly import all top-level modules (ensuring\n# they override the same names inadvertently imported\n# from a subpackage)\n\nfrom nltk import ccg, chunk, classify, collocations\nfrom nltk import data, featstruct, grammar, help, inference, metrics\nfrom nltk import misc, parse, probability, sem, stem, wsd\nfrom nltk import tag, tbl, text, tokenize, translate, tree, util\n\n\n# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116\ndef demo():\n print(\"To run the demo code for a module, type nltk.module.demo()\")\n", "path": "nltk/__init__.py" } ]
[ { "content": "# Natural Language Toolkit (NLTK)\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Authors: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nThe Natural Language Toolkit (NLTK) is an open source Python library\nfor Natural Language Processing. A free online book is available.\n(If you use the library for academic research, please cite the book.)\n\nSteven Bird, Ewan Klein, and Edward Loper (2009).\nNatural Language Processing with Python. O'Reilly Media Inc.\nhttps://www.nltk.org/book/\n\nisort:skip_file\n\"\"\"\n\nimport os\n\n# //////////////////////////////////////////////////////\n# Metadata\n# //////////////////////////////////////////////////////\n\n# Version. For each new release, the version number should be updated\n# in the file VERSION.\ntry:\n # If a VERSION file exists, use it!\n version_file = os.path.join(os.path.dirname(__file__), \"VERSION\")\n with open(version_file) as infile:\n __version__ = infile.read().strip()\nexcept NameError:\n __version__ = \"unknown (running code interactively?)\"\nexcept OSError as ex:\n __version__ = \"unknown (%s)\" % ex\n\nif __doc__ is not None: # fix for the ``python -OO``\n __doc__ += \"\\n@version: \" + __version__\n\n\n# Copyright notice\n__copyright__ = \"\"\"\\\nCopyright (C) 2001-2021 NLTK Project.\n\nDistributed and Licensed under the Apache License, Version 2.0,\nwhich is included by reference.\n\"\"\"\n\n__license__ = \"Apache License, Version 2.0\"\n# Description of the toolkit, keywords, and the project's primary URL.\n__longdescr__ = \"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\"\n__keywords__ = [\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n]\n__url__ = \"https://www.nltk.org/\"\n\n# Maintainer, contributors, etc.\n__maintainer__ = \"NLTK Team\"\n__maintainer_email__ = \"[email protected]\"\n__author__ = __maintainer__\n__author_email__ = __maintainer_email__\n\n# \"Trove\" classifiers for Python Package Index.\n__classifiers__ = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n]\n\nfrom nltk.internals import config_java\n\n# support numpy from pypy\ntry:\n import numpypy\nexcept ImportError:\n pass\n\n# Override missing methods on environments where it cannot be used like GAE.\nimport subprocess\n\nif not hasattr(subprocess, \"PIPE\"):\n\n def _fake_PIPE(*args, **kwargs):\n raise NotImplementedError(\"subprocess.PIPE is not supported.\")\n\n subprocess.PIPE = _fake_PIPE\nif not hasattr(subprocess, \"Popen\"):\n\n def _fake_Popen(*args, **kwargs):\n raise NotImplementedError(\"subprocess.Popen is not supported.\")\n\n subprocess.Popen = _fake_Popen\n\n###########################################################\n# TOP-LEVEL MODULES\n###########################################################\n\n# Import top-level functionality into top-level namespace\n\nfrom nltk.collocations import *\nfrom nltk.decorators import decorator, memoize\nfrom nltk.featstruct import *\nfrom nltk.grammar import *\nfrom nltk.probability import *\nfrom nltk.text import *\nfrom nltk.util import *\nfrom nltk.jsontags import *\n\n###########################################################\n# PACKAGES\n###########################################################\n\nfrom nltk.chunk import *\nfrom nltk.classify import *\nfrom nltk.inference import *\nfrom nltk.metrics import *\nfrom nltk.parse import *\nfrom nltk.tag import *\nfrom nltk.tokenize import *\nfrom nltk.translate import *\nfrom nltk.tree import *\nfrom nltk.sem import *\nfrom nltk.stem import *\n\n# Packages which can be lazily imported\n# (a) we don't import *\n# (b) they're slow to import or have run-time dependencies\n# that can safely fail at run time\n\nfrom nltk import lazyimport\n\napp = lazyimport.LazyModule(\"nltk.app\", locals(), globals())\nchat = lazyimport.LazyModule(\"nltk.chat\", locals(), globals())\ncorpus = lazyimport.LazyModule(\"nltk.corpus\", locals(), globals())\ndraw = lazyimport.LazyModule(\"nltk.draw\", locals(), globals())\ntoolbox = lazyimport.LazyModule(\"nltk.toolbox\", locals(), globals())\n\n# Optional loading\n\ntry:\n import numpy\nexcept ImportError:\n pass\nelse:\n from nltk import cluster\n\nfrom nltk.downloader import download, download_shell\n\ntry:\n import tkinter\nexcept ImportError:\n pass\nelse:\n try:\n from nltk.downloader import download_gui\n except RuntimeError as e:\n import warnings\n\n warnings.warn(\n \"Corpus downloader GUI not loaded \"\n \"(RuntimeError during import: %s)\" % str(e)\n )\n\n# explicitly import all top-level modules (ensuring\n# they override the same names inadvertently imported\n# from a subpackage)\n\nfrom nltk import ccg, chunk, classify, collocations\nfrom nltk import data, featstruct, grammar, help, inference, metrics\nfrom nltk import misc, parse, probability, sem, stem, wsd\nfrom nltk import tag, tbl, text, tokenize, translate, tree, util\n\n\n# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116\ndef demo():\n print(\"To run the demo code for a module, type nltk.module.demo()\")\n", "path": "nltk/__init__.py" } ]
diff --git a/nltk/__init__.py b/nltk/__init__.py index a96ac22b25..9573e73791 100644 --- a/nltk/__init__.py +++ b/nltk/__init__.py @@ -13,7 +13,7 @@ Steven Bird, Ewan Klein, and Edward Loper (2009). Natural Language Processing with Python. O'Reilly Media Inc. -https://www.nltk.org/book +https://www.nltk.org/book/ isort:skip_file """ diff --git a/nltk/test/portuguese_en.doctest b/nltk/test/portuguese_en.doctest index e490d4cee5..41c0da1b31 100644 --- a/nltk/test/portuguese_en.doctest +++ b/nltk/test/portuguese_en.doctest @@ -7,7 +7,7 @@ Examples for Portuguese Processing This HOWTO contains a variety of examples relating to the Portuguese language. It is intended to be read in conjunction with the NLTK book -(``https://www.nltk.org/book``). For instructions on running the Python +(``https://www.nltk.org/book/``). For instructions on running the Python interpreter, please see the section *Getting Started with Python*, in Chapter 1. -------------------------------------------- diff --git a/web/index.rst b/web/index.rst index 4481c553c8..ed55f1be14 100644 --- a/web/index.rst +++ b/web/index.rst @@ -15,7 +15,7 @@ NLTK is available for Windows, Mac OS X, and Linux. Best of all, NLTK is a free, NLTK has been called "a wonderful tool for teaching, and working in, computational linguistics using Python," and "an amazing library to play with natural language." -`Natural Language Processing with Python <https://www.nltk.org/book>`_ provides a practical +`Natural Language Processing with Python <https://www.nltk.org/book/>`_ provides a practical introduction to programming for language processing. Written by the creators of NLTK, it guides the reader through the fundamentals of writing Python programs, working with corpora, categorizing text, analyzing linguistic structure, diff --git a/web/news.rst b/web/news.rst index 5acc8d8d43..d72db6d0cb 100644 --- a/web/news.rst +++ b/web/news.rst @@ -172,7 +172,7 @@ NLTK 3.0.0b2 released: August 2014 Minor bugfixes and clean-ups. NLTK Book Updates: July 2014 - The NLTK book is being updated for Python 3 and NLTK 3 `here <https://www.nltk.org/book>`__. + The NLTK book is being updated for Python 3 and NLTK 3 `here <https://www.nltk.org/book/>`__. The original Python 2 edition is still available `here <https://www.nltk.org/book_1ed>`__. NLTK 3.0.0b1 released: July 2014
Textualize__rich-2108
[BUG] Rich's IPython extension doesn't work **Describe the bug** When trying to use `%load_ext rich` in **IPython** on Terminal it says following: ``` %Python 3.10.3 (main, Mar 17 2022, 04:46:20) [Clang 12.0.8 (https://android.googlesource.com/toolchain/llvm-project c935d99d7 Type 'copyright', 'credits' or 'license' for more information IPython 8.1.1 -- An enhanced Interactive Python. Type '?' for help. In [1]: %load_ext rich The rich module is not an IPython extension. ``` **Platform** <details> <summary>Click to expand</summary> What platform (Win/Linux/Mac) are you running on? What terminal software are you using? I may ask you to copy and paste the output of the following commands. It may save some time if you do it now. If you're using Rich in a terminal: ``` python -m rich.diagnose pip freeze | grep rich ``` If you're using Rich in a Jupyter Notebook, run the following snippet in a cell and paste the output in your bug report. ```python from rich.diagnose import report report() ``` </details> ``` ❯ python -m rich.diagnose pip freeze | grep rich ╭────────────────── <class 'rich.console.Console'> ──────────────────╮ │ A high level console interface. │ │ │ │ ╭────────────────────────────────────────────────────────────────╮ │ │ │ <console width=70 ColorSystem.TRUECOLOR> │ │ │ ╰────────────────────────────────────────────────────────────────╯ │ │ │ │ color_system = 'truecolor' │ │ encoding = 'utf-8' │ │ file = <_io.TextIOWrapper name='<stdout>' mode='w' │ │ encoding='utf-8'> │ │ height = 45 │ │ is_alt_screen = False │ │ is_dumb_terminal = False │ │ is_interactive = True │ │ is_jupyter = False │ │ is_terminal = True │ │ legacy_windows = False │ │ no_color = False │ │ options = ConsoleOptions( │ │ size=ConsoleDimensions( │ │ width=70, │ │ height=45 │ │ ), │ │ legacy_windows=False, │ │ min_width=1, │ │ max_width=70, │ │ is_terminal=True, │ │ encoding='utf-8', │ │ max_height=45, │ │ justify=None, │ │ overflow=None, │ │ no_wrap=False, │ │ highlight=None, │ │ markup=None, │ │ height=None │ │ ) │ │ quiet = False │ │ record = False │ │ safe_box = True │ │ size = ConsoleDimensions(width=70, height=45) │ │ soft_wrap = False │ │ stderr = False │ │ style = None │ │ tab_size = 8 │ │ width = 70 │ ╰────────────────────────────────────────────────────────────────────╯ ╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮ │ Windows features available. │ │ │ │ ╭───────────────────────────────────────────────────╮ │ │ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │ │ ╰───────────────────────────────────────────────────╯ │ │ │ │ truecolor = False │ │ vt = False │ ╰───────────────────────────────────────────────────────╯ ╭────── Environment Variables ───────╮ │ { │ │ 'TERM': 'xterm-256color', │ │ 'COLORTERM': 'truecolor', │ │ 'CLICOLOR': None, │ │ 'NO_COLOR': None, │ │ 'TERM_PROGRAM': None, │ │ 'COLUMNS': None, │ │ 'LINES': None, │ │ 'JPY_PARENT_PID': None, │ │ 'VSCODE_VERBOSE_LOGGING': None │ │ } │ ╰────────────────────────────────────╯ platform="Linux" rich @ file:///storage/emulated/0/Projects/rich ``` [![asciicast](https://asciinema.org/a/Xd3qDv897tjdEll0csW5XZk0T.svg)](https://asciinema.org/a/Xd3qDv897tjdEll0csW5XZk0T)
[ { "content": "\"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n\nimport os\nfrom typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union\n\n\n__all__ = [\"get_console\", \"reconfigure\", \"print\", \"inspect\"]\n\nif TYPE_CHECKING:\n from .console import Console\n\n# Global console used by alternative print\n_console: Optional[\"Console\"] = None\n\n_IMPORT_CWD = os.path.abspath(os.getcwd())\n\n\ndef get_console() -> \"Console\":\n \"\"\"Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n \"\"\"\n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n\ndef reconfigure(*args: Any, **kwargs: Any) -> None:\n \"\"\"Reconfigures the global console by replacing it with another.\n\n Args:\n console (Console): Replacement console instance.\n \"\"\"\n from rich.console import Console\n\n new_console = Console(*args, **kwargs)\n _console = get_console()\n _console.__dict__ = new_console.__dict__\n\n\ndef print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False,\n) -> None:\n r\"\"\"Print object(s) supplied via positional arguments.\n This function has an identical signature to the built-in print.\n For more advanced features, see the :class:`~rich.console.Console` class.\n\n Args:\n sep (str, optional): Separator between printed objects. Defaults to \" \".\n end (str, optional): Character to write at end of output. Defaults to \"\\\\n\".\n file (IO[str], optional): File to write to, or None for stdout. Defaults to None.\n flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.\n\n \"\"\"\n from .console import Console\n\n write_console = get_console() if file is None else Console(file=file)\n return write_console.print(*objects, sep=sep, end=end)\n\n\ndef print_json(\n json: Optional[str] = None,\n *,\n data: Any = None,\n indent: Union[None, int, str] = 2,\n highlight: bool = True,\n skip_keys: bool = False,\n ensure_ascii: bool = True,\n check_circular: bool = True,\n allow_nan: bool = True,\n default: Optional[Callable[[Any], Any]] = None,\n sort_keys: bool = False,\n) -> None:\n \"\"\"Pretty prints JSON. Output will be valid JSON.\n\n Args:\n json (str): A string containing JSON.\n data (Any): If json is not supplied, then encode this data.\n indent (int, optional): Number of spaces to indent. Defaults to 2.\n highlight (bool, optional): Enable highlighting of output: Defaults to True.\n skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.\n ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.\n check_circular (bool, optional): Check for circular references. Defaults to True.\n allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.\n default (Callable, optional): A callable that converts values that can not be encoded\n in to something that can be JSON encoded. Defaults to None.\n sort_keys (bool, optional): Sort dictionary keys. Defaults to False.\n \"\"\"\n\n get_console().print_json(\n json,\n data=data,\n indent=indent,\n highlight=highlight,\n skip_keys=skip_keys,\n ensure_ascii=ensure_ascii,\n check_circular=check_circular,\n allow_nan=allow_nan,\n default=default,\n sort_keys=sort_keys,\n )\n\n\ndef inspect(\n obj: Any,\n *,\n console: Optional[\"Console\"] = None,\n title: Optional[str] = None,\n help: bool = False,\n methods: bool = False,\n docs: bool = True,\n private: bool = False,\n dunder: bool = False,\n sort: bool = True,\n all: bool = False,\n value: bool = True,\n) -> None:\n \"\"\"Inspect any Python object.\n\n * inspect(<OBJECT>) to see summarized info.\n * inspect(<OBJECT>, methods=True) to see methods.\n * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.\n * inspect(<OBJECT>, private=True) to see private attributes (single underscore).\n * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.\n * inspect(<OBJECT>, all=True) to see all attributes.\n\n Args:\n obj (Any): An object to inspect.\n title (str, optional): Title to display over inspect result, or None use type. Defaults to None.\n help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.\n methods (bool, optional): Enable inspection of callables. Defaults to False.\n docs (bool, optional): Also render doc strings. Defaults to True.\n private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.\n dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.\n sort (bool, optional): Sort attributes alphabetically. Defaults to True.\n all (bool, optional): Show all attributes. Defaults to False.\n value (bool, optional): Pretty print value. Defaults to True.\n \"\"\"\n _console = console or get_console()\n from rich._inspect import Inspect\n\n # Special case for inspect(inspect)\n is_inspect = obj is inspect\n\n _inspect = Inspect(\n obj,\n title=title,\n help=is_inspect or help,\n methods=is_inspect or methods,\n docs=is_inspect or docs,\n private=private,\n dunder=dunder,\n sort=sort,\n all=all,\n value=value,\n )\n _console.print(_inspect)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(\"Hello, **World**\")\n", "path": "rich/__init__.py" } ]
[ { "content": "\"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n\nimport os\nfrom typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union\n\nfrom ._extension import load_ipython_extension\n\n__all__ = [\"get_console\", \"reconfigure\", \"print\", \"inspect\"]\n\nif TYPE_CHECKING:\n from .console import Console\n\n# Global console used by alternative print\n_console: Optional[\"Console\"] = None\n\n_IMPORT_CWD = os.path.abspath(os.getcwd())\n\n\ndef get_console() -> \"Console\":\n \"\"\"Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n \"\"\"\n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n\ndef reconfigure(*args: Any, **kwargs: Any) -> None:\n \"\"\"Reconfigures the global console by replacing it with another.\n\n Args:\n console (Console): Replacement console instance.\n \"\"\"\n from rich.console import Console\n\n new_console = Console(*args, **kwargs)\n _console = get_console()\n _console.__dict__ = new_console.__dict__\n\n\ndef print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False,\n) -> None:\n r\"\"\"Print object(s) supplied via positional arguments.\n This function has an identical signature to the built-in print.\n For more advanced features, see the :class:`~rich.console.Console` class.\n\n Args:\n sep (str, optional): Separator between printed objects. Defaults to \" \".\n end (str, optional): Character to write at end of output. Defaults to \"\\\\n\".\n file (IO[str], optional): File to write to, or None for stdout. Defaults to None.\n flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.\n\n \"\"\"\n from .console import Console\n\n write_console = get_console() if file is None else Console(file=file)\n return write_console.print(*objects, sep=sep, end=end)\n\n\ndef print_json(\n json: Optional[str] = None,\n *,\n data: Any = None,\n indent: Union[None, int, str] = 2,\n highlight: bool = True,\n skip_keys: bool = False,\n ensure_ascii: bool = True,\n check_circular: bool = True,\n allow_nan: bool = True,\n default: Optional[Callable[[Any], Any]] = None,\n sort_keys: bool = False,\n) -> None:\n \"\"\"Pretty prints JSON. Output will be valid JSON.\n\n Args:\n json (str): A string containing JSON.\n data (Any): If json is not supplied, then encode this data.\n indent (int, optional): Number of spaces to indent. Defaults to 2.\n highlight (bool, optional): Enable highlighting of output: Defaults to True.\n skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.\n ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.\n check_circular (bool, optional): Check for circular references. Defaults to True.\n allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.\n default (Callable, optional): A callable that converts values that can not be encoded\n in to something that can be JSON encoded. Defaults to None.\n sort_keys (bool, optional): Sort dictionary keys. Defaults to False.\n \"\"\"\n\n get_console().print_json(\n json,\n data=data,\n indent=indent,\n highlight=highlight,\n skip_keys=skip_keys,\n ensure_ascii=ensure_ascii,\n check_circular=check_circular,\n allow_nan=allow_nan,\n default=default,\n sort_keys=sort_keys,\n )\n\n\ndef inspect(\n obj: Any,\n *,\n console: Optional[\"Console\"] = None,\n title: Optional[str] = None,\n help: bool = False,\n methods: bool = False,\n docs: bool = True,\n private: bool = False,\n dunder: bool = False,\n sort: bool = True,\n all: bool = False,\n value: bool = True,\n) -> None:\n \"\"\"Inspect any Python object.\n\n * inspect(<OBJECT>) to see summarized info.\n * inspect(<OBJECT>, methods=True) to see methods.\n * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.\n * inspect(<OBJECT>, private=True) to see private attributes (single underscore).\n * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.\n * inspect(<OBJECT>, all=True) to see all attributes.\n\n Args:\n obj (Any): An object to inspect.\n title (str, optional): Title to display over inspect result, or None use type. Defaults to None.\n help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.\n methods (bool, optional): Enable inspection of callables. Defaults to False.\n docs (bool, optional): Also render doc strings. Defaults to True.\n private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.\n dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.\n sort (bool, optional): Sort attributes alphabetically. Defaults to True.\n all (bool, optional): Show all attributes. Defaults to False.\n value (bool, optional): Pretty print value. Defaults to True.\n \"\"\"\n _console = console or get_console()\n from rich._inspect import Inspect\n\n # Special case for inspect(inspect)\n is_inspect = obj is inspect\n\n _inspect = Inspect(\n obj,\n title=title,\n help=is_inspect or help,\n methods=is_inspect or methods,\n docs=is_inspect or docs,\n private=private,\n dunder=dunder,\n sort=sort,\n all=all,\n value=value,\n )\n _console.print(_inspect)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(\"Hello, **World**\")\n", "path": "rich/__init__.py" } ]
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 140f77f43..3dc31bf80 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -19,6 +19,7 @@ The following people have contributed to the development of Rich: - [Josh Karpel](https://github.com/JoshKarpel) - [Andrew Kettmann](https://github.com/akettmann) - [Hedy Li](https://github.com/hedythedev) +- [Luka Mamukashvili](https://github.com/UltraStudioLTD) - [Alexander Mancevice](https://github.com/amancevice) - [Will McGugan](https://github.com/willmcgugan) - [Paul McGuire](https://github.com/ptmcg) diff --git a/rich/__init__.py b/rich/__init__.py index 2b2a29f15..01faa6e6b 100644 --- a/rich/__init__.py +++ b/rich/__init__.py @@ -3,6 +3,7 @@ import os from typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union +from ._extension import load_ipython_extension __all__ = ["get_console", "reconfigure", "print", "inspect"]
django-hijack__django-hijack-429
hijack.min.css in pip package is invalid The hijack.min.css file that is included in the pip package is invalid. It appears to be simply the source scss file with whitespace removed. For example: the first statement is ``` $system-font: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'; ``` which is not valid css. This is a regression, and was introduced by commit 561be87a14f158971773fa9d3d826be2325bb3d3. As far as I can tell, nothing changed in the buildchain, but prior to that commit, hijack.scss contained only valid css.
[ { "content": "import django\nfrom django import forms\nfrom django.shortcuts import resolve_url\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom hijack.conf import settings\n\n\nclass HijackUserAdminMixin:\n \"\"\"Add hijack button to changelist admin view.\"\"\"\n\n hijack_success_url = None\n \"\"\"Return URL to which one will be forwarded to after hijacking another user.\"\"\"\n\n @property\n def media(self):\n return super().media + forms.Media(js=[\"hijack/hijack.js\"])\n\n def get_hijack_user(self, obj):\n \"\"\"\n Return the user based on the current object.\n\n This method may be overridden to support hijack keys on related objects.\n \"\"\"\n return obj\n\n def get_hijack_success_url(self, request, obj):\n \"\"\"Return URL to which one will be forwarded to after hijacking another user.\"\"\"\n success_url = settings.LOGIN_REDIRECT_URL\n if self.hijack_success_url:\n success_url = self.hijack_success_url\n elif hasattr(obj, \"get_absolute_url\"):\n success_url = obj\n return resolve_url(success_url)\n\n def hijack_button(self, request, obj):\n \"\"\"\n Render hijack button.\n\n Should the user only be a related object we include the username in the button\n to ensure deliberate action. However, the name is omitted in the user admin,\n as the table layout suggests that the button targets the current user.\n \"\"\"\n user = self.get_hijack_user(obj)\n return render_to_string(\n \"hijack/contrib/admin/button.html\",\n {\n \"request\": request,\n \"another_user\": user,\n \"username\": str(user),\n \"is_user_admin\": self.model == type(user),\n \"next\": self.get_hijack_success_url(request, obj),\n },\n )\n\n def get_changelist_instance(self, request):\n # We inject the request for the CSRF token, see also:\n # https://code.djangoproject.com/ticket/13659\n def hijack_field(obj):\n return self.hijack_button(request, obj)\n\n hijack_field.short_description = _(\"hijack user\")\n\n # we\n list_display = [*self.get_list_display(request), hijack_field]\n # Same as super method, see also:\n # https://github.com/django/django/blob/76c0b32f826469320c59709d31e2f2126dd7c505/django/contrib/admin/options.py#L724-L750\n list_display_links = self.get_list_display_links(request, list_display)\n # Add the action checkboxes if any actions are available.\n if self.get_actions(request):\n list_display = [\"action_checkbox\", *list_display]\n sortable_by = self.get_sortable_by(request)\n ChangeList = self.get_changelist(request)\n args = [\n request,\n self.model,\n list_display,\n list_display_links,\n self.get_list_filter(request),\n self.date_hierarchy,\n self.get_search_fields(request),\n self.get_list_select_related(request),\n self.list_per_page,\n self.list_max_show_all,\n self.list_editable,\n self,\n sortable_by,\n ]\n if django.VERSION >= (4, 0):\n args.append(self.search_help_text)\n return ChangeList(*args)\n", "path": "hijack/contrib/admin/admin.py" } ]
[ { "content": "import django\nfrom django import forms\nfrom django.shortcuts import resolve_url\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom hijack.conf import settings\n\n\nclass HijackUserAdminMixin:\n \"\"\"Add hijack button to changelist admin view.\"\"\"\n\n hijack_success_url = None\n \"\"\"Return URL to which one will be forwarded to after hijacking another user.\"\"\"\n\n @property\n def media(self):\n return super().media + forms.Media(js=[\"hijack/hijack.min.js\"])\n\n def get_hijack_user(self, obj):\n \"\"\"\n Return the user based on the current object.\n\n This method may be overridden to support hijack keys on related objects.\n \"\"\"\n return obj\n\n def get_hijack_success_url(self, request, obj):\n \"\"\"Return URL to which one will be forwarded to after hijacking another user.\"\"\"\n success_url = settings.LOGIN_REDIRECT_URL\n if self.hijack_success_url:\n success_url = self.hijack_success_url\n elif hasattr(obj, \"get_absolute_url\"):\n success_url = obj\n return resolve_url(success_url)\n\n def hijack_button(self, request, obj):\n \"\"\"\n Render hijack button.\n\n Should the user only be a related object we include the username in the button\n to ensure deliberate action. However, the name is omitted in the user admin,\n as the table layout suggests that the button targets the current user.\n \"\"\"\n user = self.get_hijack_user(obj)\n return render_to_string(\n \"hijack/contrib/admin/button.html\",\n {\n \"request\": request,\n \"another_user\": user,\n \"username\": str(user),\n \"is_user_admin\": self.model == type(user),\n \"next\": self.get_hijack_success_url(request, obj),\n },\n )\n\n def get_changelist_instance(self, request):\n # We inject the request for the CSRF token, see also:\n # https://code.djangoproject.com/ticket/13659\n def hijack_field(obj):\n return self.hijack_button(request, obj)\n\n hijack_field.short_description = _(\"hijack user\")\n\n # we\n list_display = [*self.get_list_display(request), hijack_field]\n # Same as super method, see also:\n # https://github.com/django/django/blob/76c0b32f826469320c59709d31e2f2126dd7c505/django/contrib/admin/options.py#L724-L750\n list_display_links = self.get_list_display_links(request, list_display)\n # Add the action checkboxes if any actions are available.\n if self.get_actions(request):\n list_display = [\"action_checkbox\", *list_display]\n sortable_by = self.get_sortable_by(request)\n ChangeList = self.get_changelist(request)\n args = [\n request,\n self.model,\n list_display,\n list_display_links,\n self.get_list_filter(request),\n self.date_hierarchy,\n self.get_search_fields(request),\n self.get_list_select_related(request),\n self.list_per_page,\n self.list_max_show_all,\n self.list_editable,\n self,\n sortable_by,\n ]\n if django.VERSION >= (4, 0):\n args.append(self.search_help_text)\n return ChangeList(*args)\n", "path": "hijack/contrib/admin/admin.py" } ]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c6e0a379..a185b200 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,19 +10,6 @@ on: jobs: - analyze: - name: CodeQL - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: python - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 - msgcheck: runs-on: ubuntu-latest steps: @@ -70,8 +57,8 @@ jobs: - uses: actions/[email protected] - uses: actions/checkout@v3 - name: Install Python dependencies - run: python -m pip install --upgrade pip setuptools wheel twine readme-renderer - - run: python setup.py sdist bdist_wheel + run: python -m pip install --upgrade pip build wheel twine readme-renderer + - run: python -m build --sdist --wheel - run: python -m twine check dist/* - uses: actions/upload-artifact@v2 with: @@ -85,34 +72,34 @@ jobs: - run: npm ci - run: npm run lint:scss + standardjs: + runs-on: ubuntu-latest + steps: + - uses: actions/[email protected] + - uses: actions/checkout@v3 + - run: npm ci + - run: npm run lint:js + pytest: runs-on: ubuntu-latest needs: - readme - lint - stylelint + - standardjs - msgcheck strategy: matrix: python-version: - - "3.6" - - "3.7" - "3.8" - "3.9" + - "3.10" django-version: - - ">=2.2a1,<3.0" - - ">=3.0a1,<3.1" - - ">=3.1a1,<3.2" - - ">=3.2a1,<4.0" - - ">=4.0a1,<4.1" - exclude: - - python-version: "3.6" - django-version: ">=4.0a1,<4.1" - - python-version: "3.7" - django-version: ">=4.0a1,<4.1" + - "3.2" + - "4.0" include: - - python-version: "3.10.0-alpha - 3.10" - django-version: ">=4.0a1,<4.1" + - python-version: "3.7" + django-version: "3.2" steps: - run: sudo apt install -y gettext - name: Set up Python ${{ matrix.python-version }} @@ -123,9 +110,9 @@ jobs: - uses: actions/checkout@v3 - name: Upgrade Python setuptools run: python -m pip install --upgrade pip setuptools wheel codecov + - run: python -m pip install -e .[test] - name: Install Django ${{ matrix.django-version }} - run: python -m pip install "django${{ matrix.django-version }}" - - run: python setup.py develop + run: python -m pip install django~=${{ matrix.django-version }}a - name: Run tests - run: python setup.py test + run: python -m pytest - run: codecov diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a0fb92bf..80d8139f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,9 +13,9 @@ jobs: - uses: actions/[email protected] - uses: actions/checkout@v3 - name: Install Python dependencies - run: python -m pip install --upgrade pip setuptools wheel twine + run: python -m pip install --upgrade pip build wheel twine - name: Build dist packages - run: python setup.py sdist bdist_wheel + run: python -m build --sdist --wheel - name: Upload packages run: python -m twine upload dist/* env: diff --git a/.gitignore b/.gitignore index b9c4e43d..9694b09b 100644 --- a/.gitignore +++ b/.gitignore @@ -73,5 +73,5 @@ node_modules/ *.mo # minified static files -*.min.* -*.css +*.min.js +*.min.css diff --git a/.stylelintrc.json b/.stylelintrc.json index 11d88f00..841f85f1 100644 --- a/.stylelintrc.json +++ b/.stylelintrc.json @@ -1,6 +1,9 @@ { - "extends": "stylelint-config-standard", + "extends": "stylelint-config-standard-scss", "plugins": [ "stylelint-scss" + ], + "ignoreFiles": [ + "venv/**" ] } diff --git a/MANIFEST.in b/MANIFEST.in index bd987d7c..084311a8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ prune .github exclude .gitignore exclude requirements.txt -include hijack/locale/*/LC_MESSAGES/django.mo hijack/static/hijack/hijack.min.css hijack/static/hijack/hijack.js +include hijack/locale/*/LC_MESSAGES/django.mo hijack/static/hijack/hijack.min.* diff --git a/hijack/contrib/admin/admin.py b/hijack/contrib/admin/admin.py index fe769d56..ec37d8cb 100644 --- a/hijack/contrib/admin/admin.py +++ b/hijack/contrib/admin/admin.py @@ -15,7 +15,7 @@ class HijackUserAdminMixin: @property def media(self): - return super().media + forms.Media(js=["hijack/hijack.js"]) + return super().media + forms.Media(js=["hijack/hijack.min.js"]) def get_hijack_user(self, obj): """ diff --git a/hijack/static/hijack/hijack.scss b/hijack/static/hijack/hijack.scss index 197ba86c..2e3a83b3 100644 --- a/hijack/static/hijack/hijack.scss +++ b/hijack/static/hijack/hijack.scss @@ -1,5 +1,5 @@ -/* stylelint-disable-next-line max-line-length */ -$system-font: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'; +/* stylelint-disable-next-line */ +$system-font: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; .djhj { position: fixed; @@ -16,7 +16,7 @@ $system-font: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, padding: 1rem 1rem 0; margin: 1rem; max-width: 540px; - box-shadow: 0 0 0.5em rgba(black 30%); + box-shadow: 0 0 0.5em rgba(0 0 0 / 30%); font-family: $system-font; font-size: initial; background: whitesmoke; diff --git a/package-lock.json b/package-lock.json index 4ad0ac3c..81dd0026 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,49 +10,123 @@ "license": "MIT", "devDependencies": { "autoprefixer": "^10.4.2", - "cssnano": "^5.1.0", - "postcss": "^8.4.7", - "postcss-cli": "^9.1.0", - "postcss-nested": "^5.0.6", - "sass": "^1.49.9", + "css-loader": "^6.7.1", + "mini-css-extract-plugin": "^2.6.0", + "postcss": "^8.4.12", + "postcss-loader": "^6.2.1", + "sass": "^1.49.10", + "sass-loader": "^12.6.0", + "standard": "^16.0.4", "stylelint": "^14.3.0", - "stylelint-config-standard": "^25.0.0", - "stylelint-scss": "^4.1.0" + "stylelint-config-standard-scss": "^3.0.0", + "stylelint-scss": "^4.1.0", + "webpack": "^5.70.0", + "webpack-cli": "^4.9.2" } }, "node_modules/@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", + "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", "dev": true, "dependencies": { - "@babel/highlight": "^7.12.13" + "@babel/highlight": "^7.16.7" + }, + "engines": { + "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==", - "dev": true + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz", + "integrity": "sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } }, "node_modules/@babel/highlight": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.8.tgz", - "integrity": "sha512-4vrIhfJyfNf+lCtXC2ck1rKSzDwciqF7IWFhXXrSOUC2O5DrVp+w4c6ed4AllTxhTkUP5x2tYj41VaxdVMMRDw==", + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", + "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", + "@babel/helper-validator-identifier": "^7.16.7", "chalk": "^2.0.0", "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "dev": true, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.3.0.tgz", + "integrity": "sha512-1JTKgrOKAHVivSvOYw+sJOunkBjUOvjqWk1DPja7ZFhIS2mX/4EgTT8M7eTK9jrKhL/FvXXEbQwIs3pg1xp3dg==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "lodash": "^4.17.20", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true, + "engines": { + "node": ">= 4" } }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, "node_modules/@nodelib/fs.scandir": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz", - "integrity": "sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA==", + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, "dependencies": { - "@nodelib/fs.stat": "2.0.4", + "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" }, "engines": { @@ -60,46 +134,81 @@ } }, "node_modules/@nodelib/fs.stat": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz", - "integrity": "sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q==", + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.walk": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz", - "integrity": "sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow==", + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, "dependencies": { - "@nodelib/fs.scandir": "2.1.4", + "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" }, "engines": { "node": ">= 8" } }, - "node_modules/@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "node_modules/@types/eslint": { + "version": "8.4.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.1.tgz", + "integrity": "sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA==", "dev": true, - "engines": { - "node": ">=10.13.0" + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.3", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.3.tgz", + "integrity": "sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g==", + "dev": true, + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" } }, + "node_modules/@types/estree": { + "version": "0.0.51", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", + "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", + "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", + "dev": true + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", + "dev": true + }, "node_modules/@types/minimist": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.1.tgz", - "integrity": "sha512-fZQQafSREFyuZcdWFAExYjBiCL7AUCdgsk80iO0q4yihYYdcIiH28CcuPTGFgLOCC8RlW49GSQxdHwZP+I7CNg==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", + "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==", + "dev": true + }, + "node_modules/@types/node": { + "version": "17.0.23", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.23.tgz", + "integrity": "sha512-UxDxWn7dl97rKVeVS61vErvw086aCYhDLyvRQZ5Rk65rZKepaFdm53GeqXaKBuOhED4e9uWq34IC3TdSdJJ2Gw==", "dev": true }, "node_modules/@types/normalize-package-data": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", - "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz", + "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", "dev": true }, "node_modules/@types/parse-json": { @@ -108,10 +217,234 @@ "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", "dev": true }, + "node_modules/@webassemblyjs/ast": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", + "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", + "dev": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", + "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", + "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", + "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", + "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", + "dev": true, + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.1", + "@webassemblyjs/helper-api-error": "1.11.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", + "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", + "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", + "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", + "dev": true, + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", + "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", + "dev": true, + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", + "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==", + "dev": true + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", + "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/helper-wasm-section": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1", + "@webassemblyjs/wasm-opt": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1", + "@webassemblyjs/wast-printer": "1.11.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", + "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/ieee754": "1.11.1", + "@webassemblyjs/leb128": "1.11.1", + "@webassemblyjs/utf8": "1.11.1" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", + "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", + "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-api-error": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/ieee754": "1.11.1", + "@webassemblyjs/leb128": "1.11.1", + "@webassemblyjs/utf8": "1.11.1" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", + "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.11.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webpack-cli/configtest": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-1.1.1.tgz", + "integrity": "sha512-1FBc1f9G4P/AxMqIgfZgeOTuRnwZMten8E7zap5zgpPInnCrP8D4Q81+4CWIch8i/Nf7nXjP0v6CjjbHOrXhKg==", + "dev": true, + "peerDependencies": { + "webpack": "4.x.x || 5.x.x", + "webpack-cli": "4.x.x" + } + }, + "node_modules/@webpack-cli/info": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-1.4.1.tgz", + "integrity": "sha512-PKVGmazEq3oAo46Q63tpMr4HipI3OPfP7LiNOEJg963RMgT0rqheag28NCML0o3GIzA3DmxP1ZIAv9oTX1CUIA==", + "dev": true, + "dependencies": { + "envinfo": "^7.7.3" + }, + "peerDependencies": { + "webpack-cli": "4.x.x" + } + }, + "node_modules/@webpack-cli/serve": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-1.6.1.tgz", + "integrity": "sha512-gNGTiTrjEVQ0OcVnzsRSqTxaBSr+dmTfm+qJsCDluky8uhdLWep7Gcr62QsAKHTMxjCS/8nEITsmFAhfIx+QSw==", + "dev": true, + "peerDependencies": { + "webpack-cli": "4.x.x" + }, + "peerDependenciesMeta": { + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true + }, + "node_modules/acorn": { + "version": "8.7.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.7.0.tgz", + "integrity": "sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", + "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", + "dev": true, + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, "node_modules/ajv": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.9.0.tgz", - "integrity": "sha512-qOKJyNj/h+OWx7s5DePL6Zu1KeM9jPZhwBqs+7DzP6bGOvqzVCSf0xueYmVuaC/oQ/VtS2zLMLHdQFbkka+XDQ==", + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", + "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", @@ -124,6 +457,44 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -145,17 +516,32 @@ "node": ">=4" } }, - "node_modules/anymatch": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", - "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "sprintf-js": "~1.0.2" + } + }, + "node_modules/array-includes": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "is-string": "^1.0.7" }, "engines": { - "node": ">= 8" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/array-union": { @@ -167,6 +553,40 @@ "node": ">=8" } }, + "node_modules/array.prototype.flat": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.5.tgz", + "integrity": "sha512-08u6rVyi1Lj7oqWbS9nUxliETrtIROT4XGTA4D/LWGten6E3ocm7cy9SIrmNHOL5XVbVuckUp3X6Xyg8/zpvHA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/arrify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", @@ -186,14 +606,24 @@ } }, "node_modules/autoprefixer": { - "version": "10.4.2", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.2.tgz", - "integrity": "sha512-9fOPpHKuDW1w/0EKfRmVnxTDt8166MAnLI3mgZ1JCnhNtYWxcJ6Ud5CO/AVOZi/AvFa8DY9RTy3h3+tFBlrrdQ==", + "version": "10.4.4", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.4.tgz", + "integrity": "sha512-Tm8JxsB286VweiZ5F0anmbyGiNI3v3wGv3mz9W+cxEDYB/6jbnj6GM9H9mK3wIL8ftgl+C07Lcwb8PG5PCCPzA==", "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + } + ], "dependencies": { - "browserslist": "^4.19.1", - "caniuse-lite": "^1.0.30001297", - "fraction.js": "^4.1.2", + "browserslist": "^4.20.2", + "caniuse-lite": "^1.0.30001317", + "fraction.js": "^4.2.0", "normalize-range": "^0.1.2", "picocolors": "^1.0.0", "postcss-value-parser": "^4.2.0" @@ -204,10 +634,6 @@ "engines": { "node": "^10 || ^12 || >=14" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, "peerDependencies": { "postcss": "^8.1.0" } @@ -218,21 +644,6 @@ "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", "dev": true }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", - "dev": true - }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -249,28 +660,26 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/browserslist": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.19.1.tgz", - "integrity": "sha512-u2tbbG5PdKRTUoctO3NBD8FQ5HdPh1ZXPHzp1rwaa5jTc+RV9/+RlWiAIKmjRPQF+xbGM9Kklj5bZQFa2s/38A==", + "version": "4.20.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.20.2.tgz", + "integrity": "sha512-CQOBCqp/9pDvDbx3xfMi+86pr4KXIf2FDkTTdeuYw8OxS9t898LA1Khq57gtufFILXpfgsSx5woNgsBgvGjpsA==", "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + } + ], "dependencies": { - "caniuse-lite": "^1.0.30001286", - "electron-to-chromium": "^1.4.17", + "caniuse-lite": "^1.0.30001317", + "electron-to-chromium": "^1.4.84", "escalade": "^3.1.1", - "node-releases": "^2.0.1", + "node-releases": "^2.0.2", "picocolors": "^1.0.0" }, "bin": { @@ -278,10 +687,25 @@ }, "engines": { "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/callsites": { @@ -319,27 +743,21 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dev": true, - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, "node_modules/caniuse-lite": { - "version": "1.0.30001309", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001309.tgz", - "integrity": "sha512-Pl8vfigmBXXq+/yUz1jUwULeq9xhMJznzdc/xwl4WclDAuebcTHVefpz8lE/bMI+UN7TOkSSe7B7RnZd6+dzjA==", + "version": "1.0.30001322", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001322.tgz", + "integrity": "sha512-neRmrmIrCGuMnxGSoh+x7zYtQFFgnSY2jaomjU56sCkTA6JINqQrxutF459JpWcWRajvoyn95sOXq4Pqrnyjew==", "dev": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - } + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + } + ] }, "node_modules/chalk": { "version": "2.4.2", @@ -355,36 +773,48 @@ "node": ">=4" } }, - "node_modules/chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "dev": true, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", "dev": true, "dependencies": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" }, "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.1" + "node": ">=6" } }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "node_modules/clone-deep/node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", "dev": true, "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clone-deep/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "engines": { + "node": ">=0.10.0" } }, "node_modules/clone-regexp": { @@ -420,6 +850,12 @@ "integrity": "sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ==", "dev": true }, + "node_modules/colorette": { + "version": "2.0.16", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.16.tgz", + "integrity": "sha512-hUewv7oMjCp+wkBv5Rm0v87eJhq4woh5rSR+42YSQJKecCqgIqNkZ6lAlQms/BwHPJA5NKMRlpxPRv0n8HQW6g==", + "dev": true + }, "node_modules/commander": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", @@ -451,19 +887,33 @@ "node": ">=10" } }, - "node_modules/css-declaration-sorter": { - "version": "6.1.4", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.1.4.tgz", - "integrity": "sha512-lpfkqS0fctcmZotJGhnxkIyJWvBXgpyi2wsFd4J8VB7wzyrT6Ch/3Q+FMNJpjK4gu1+GN5khOnpU2ZVKrLbhCw==", + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, "dependencies": { - "timsort": "^0.3.0" + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" }, "engines": { - "node": ">= 10" + "node": ">= 8" + } + }, + "node_modules/cross-spawn/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" }, - "peerDependencies": { - "postcss": "^8.0.9" + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" } }, "node_modules/css-functions-list": { @@ -475,45 +925,30 @@ "node": ">=12.22" } }, - "node_modules/css-select": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.2.1.tgz", - "integrity": "sha512-/aUslKhzkTNCQUB2qTX84lVmfia9NyjP3WpDGtj/WxhwBzWBYUV3DgUpurHTme8UTPcPlAD1DJ+b0nN/t50zDQ==", - "dev": true, - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^5.1.0", - "domhandler": "^4.3.0", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "node_modules/css-loader": { + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.7.1.tgz", + "integrity": "sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw==", "dev": true, "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" + "icss-utils": "^5.1.0", + "postcss": "^8.4.7", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.0", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.5" }, "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-what": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.1.0.tgz", - "integrity": "sha512-arSMRWIIFY0hV8pIxZMEfmMI47Wj3R/aWpZDDxWYCPEiOMv6tfOrnpDtgxBYPEQD4V0Y/958+1TdC3iWTFcUPw==", - "dev": true, - "engines": { - "node": ">= 6" + "node": ">= 12.13.0" }, "funding": { - "url": "https://github.com/sponsors/fb55" + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" } }, "node_modules/cssesc": { @@ -528,98 +963,10 @@ "node": ">=4" } }, - "node_modules/cssnano": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.0.tgz", - "integrity": "sha512-wWxave1wMlThGg4ueK98jFKaNqXnQd1nVZpSkQ9XvR+YymlzP1ofWqES1JkHtI250LksP9z5JH+oDcrKDJezAg==", - "dev": true, - "dependencies": { - "cssnano-preset-default": "^5.2.0", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/cssnano" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-preset-default": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.0.tgz", - "integrity": "sha512-3N5Vcptj2pqVKpHVqH6ezOJvqikR2PdLTbTrsrhF61FbLRQuujAqZ2sKN5rvcMsb7hFjrNnjZT8CGEkxoN/Pwg==", - "dev": true, - "dependencies": { - "css-declaration-sorter": "^6.0.3", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.0", - "postcss-convert-values": "^5.1.0", - "postcss-discard-comments": "^5.1.0", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.0", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.0", - "postcss-merge-rules": "^5.1.0", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.0", - "postcss-minify-params": "^5.1.0", - "postcss-minify-selectors": "^5.2.0", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.0", - "postcss-normalize-repeat-style": "^5.1.0", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.0", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.0", - "postcss-ordered-values": "^5.1.0", - "postcss-reduce-initial": "^5.1.0", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-utils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", - "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", - "dev": true, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", - "dev": true, - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, "node_modules/debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "dependencies": { "ms": "2.1.2" @@ -664,13 +1011,22 @@ "node": ">=0.10.0" } }, - "node_modules/dependency-graph": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz", - "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", "dev": true, + "dependencies": { + "object-keys": "^1.0.12" + }, "engines": { - "node": ">= 0.6.0" + "node": ">= 0.4" } }, "node_modules/dir-glob": { @@ -685,80 +1041,65 @@ "node": ">=8" } }, - "node_modules/dom-serializer": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", - "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" + "esutils": "^2.0.2" }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + "engines": { + "node": ">=6.0.0" } }, - "node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] + "node_modules/electron-to-chromium": { + "version": "1.4.100", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.100.tgz", + "integrity": "sha512-pNrSE2naf8fizl6/Uxq8UbKb8hU9EiYW4OzCYswosXoLV5NTMOUVKECNzDaHiUubsPq/kAckOzZd7zd8S8CHVw==", + "dev": true }, - "node_modules/domhandler": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.0.tgz", - "integrity": "sha512-fC0aXNQXqKSFTr2wDNZDhsEYjCiYsDWl3D01kwt25hm1YIPyDGHvvi3rw+PLqHAl/m71MaiF7d5zvBr0p5UB2g==", + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/enhanced-resolve": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.9.2.tgz", + "integrity": "sha512-GIm3fQfwLJ8YZx2smuHpBKkXC1yOk+OBEmKckVyL0i/ea8mqDEykK3ld5dgH1QYPNyT/lIllxV2LULnxCHaHkA==", "dev": true, "dependencies": { - "domelementtype": "^2.2.0" + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" }, "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" + "node": ">=10.13.0" } }, - "node_modules/domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "node_modules/enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", "dev": true, "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" + "ansi-colors": "^4.1.1" }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" + "engines": { + "node": ">=8.6" } }, - "node_modules/electron-to-chromium": { - "version": "1.4.31", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.31.tgz", - "integrity": "sha512-t3XVQtk+Frkv6aTD4RRk0OqosU+VLe1dQFW83MDer78ZD6a52frgXuYOIsLYTQiH2Lm+JB2OKYcn7zrX+YGAiQ==", - "dev": true - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "node_modules/envinfo": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", + "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", "dev": true, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "bin": { + "envinfo": "dist/cli.js" + }, + "engines": { + "node": ">=4" } }, "node_modules/error-ex": { @@ -770,6 +1111,63 @@ "is-arrayish": "^0.2.1" } }, + "node_modules/es-abstract": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.2.tgz", + "integrity": "sha512-gfSBJoZdlL2xRiOCy0g8gLMryhoe1TlimjzU99L/31Z8QEGIhVQI+EWwt5lT+AuU9SnorVupXFqqOGqGfsyO6w==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.1.1", + "get-symbol-description": "^1.0.0", + "has": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.3", + "is-callable": "^1.2.4", + "is-negative-zero": "^2.0.2", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", + "is-string": "^1.0.7", + "is-weakref": "^1.0.2", + "object-inspect": "^1.12.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.2", + "string.prototype.trimend": "^1.0.4", + "string.prototype.trimstart": "^1.0.4", + "unbox-primitive": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-module-lexer": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", + "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==", + "dev": true + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dev": true, + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", @@ -788,983 +1186,1145 @@ "node": ">=0.8.0" } }, - "node_modules/execall": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/execall/-/execall-2.0.0.tgz", - "integrity": "sha512-0FU2hZ5Hh6iQnarpRtQurM/aAvp3RIbfvgLHrcqJYzhXyV2KFruhuChf9NC6waAhiUR7FFtlugkI4p7f2Fqlow==", + "node_modules/eslint": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.18.0.tgz", + "integrity": "sha512-fbgTiE8BfUJZuBeq2Yi7J3RB3WGUQ9PNuNbmgi6jt9Iv8qrkxfy19Ds3OpL1Pm7zg3BtTVhvcUZbIRQ0wmSjAQ==", "dev": true, "dependencies": { - "clone-regexp": "^2.1.0" + "@babel/code-frame": "^7.0.0", + "@eslint/eslintrc": "^0.3.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.1", + "esquery": "^1.2.0", + "esutils": "^2.0.2", + "file-entry-cache": "^6.0.0", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash": "^4.17.20", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^6.0.4", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "bin": { + "eslint": "bin/eslint.js" }, "engines": { - "node": ">=8" + "node": "^10.12.0 || >=12.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true + "node_modules/eslint-config-standard": { + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.3.tgz", + "integrity": "sha512-x4fmJL5hGqNJKGHSjnLdgA6U6h1YW/G2dW9fA+cyVur4SK6lyue8+UgNKWlZtUDTXvgKDD/Oa3GQjmB5kjtVvg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "peerDependencies": { + "eslint": "^7.12.1", + "eslint-plugin-import": "^2.22.1", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-promise": "^4.2.1 || ^5.0.0" + } }, - "node_modules/fast-glob": { - "version": "3.2.11", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz", - "integrity": "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", + "node_modules/eslint-config-standard-jsx": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", + "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", "dev": true, - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "peerDependencies": { + "eslint": "^7.12.1", + "eslint-plugin-react": "^7.21.5" } }, - "node_modules/fastest-levenshtein": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz", - "integrity": "sha512-On2N+BpYJ15xIC974QNVuYGMOlEVt4s0EOI3wwMqOmK1fdDY+FN/zltPV8vosq4ad4c/gJ1KHScUn/6AWIgiow==", - "dev": true + "node_modules/eslint-import-resolver-node": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", + "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", + "dev": true, + "dependencies": { + "debug": "^3.2.7", + "resolve": "^1.20.0" + } }, - "node_modules/fastq": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz", - "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==", + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dev": true, "dependencies": { - "reusify": "^1.0.4" + "ms": "^2.1.1" } }, - "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "node_modules/eslint-module-utils": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.3.tgz", + "integrity": "sha512-088JEC7O3lDZM9xGe0RerkOMd0EjFl+Yvd1jPWIkMT5u3H9+HC34mWWPnqPrN13gieT9pBOO+Qt07Nb/6TresQ==", "dev": true, "dependencies": { - "flat-cache": "^3.0.4" + "debug": "^3.2.7", + "find-up": "^2.1.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=4" } }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dev": true, "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" + "ms": "^2.1.1" } }, - "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "node_modules/eslint-module-utils/node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", "dev": true, "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" + "locate-path": "^2.0.0" }, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "node_modules/eslint-module-utils/node_modules/locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", "dev": true, "dependencies": { - "flatted": "^3.1.0", - "rimraf": "^3.0.2" + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=4" } }, - "node_modules/flatted": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.1.1.tgz", - "integrity": "sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA==", - "dev": true - }, - "node_modules/fraction.js": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.1.2.tgz", - "integrity": "sha512-o2RiJQ6DZaR/5+Si0qJUIy637QMRudSi9kU/FFzx9EZazrIdnBgpU+3sEWCxAVhH2RtxW2Oz+T4p2o8uOPVcgA==", + "node_modules/eslint-module-utils/node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", "dev": true, - "engines": { - "node": "*" + "dependencies": { + "p-try": "^1.0.0" }, - "funding": { - "type": "patreon", - "url": "https://www.patreon.com/infusion" + "engines": { + "node": ">=4" } }, - "node_modules/fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", + "node_modules/eslint-module-utils/node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", "dev": true, "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "p-limit": "^1.1.0" }, "engines": { - "node": ">=12" + "node": ">=4" } }, - "node_modules/fs.realpath": { + "node_modules/eslint-module-utils/node_modules/p-try": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + "node": ">=4" } }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "node_modules/eslint-module-utils/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", "dev": true, "engines": { - "node": "6.* || 8.* || >= 10.*" + "node": ">=4" } }, - "node_modules/get-stdin": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", - "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", + "node_modules/eslint-plugin-es": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", "dev": true, + "dependencies": { + "eslint-utils": "^2.0.0", + "regexpp": "^3.0.0" + }, "engines": { - "node": ">=10" + "node": ">=8.10.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=4.19.1" } }, - "node_modules/glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "node_modules/eslint-plugin-import": { + "version": "2.24.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.24.2.tgz", + "integrity": "sha512-hNVtyhiEtZmpsabL4neEj+6M5DCLgpYyG9nzJY8lZQeQXEn5UPW1DpUdsMHMXsq98dbNm7nt1w9ZMSVpfJdi8Q==", "dev": true, "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", + "array-includes": "^3.1.3", + "array.prototype.flat": "^1.2.4", + "debug": "^2.6.9", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-module-utils": "^2.6.2", + "find-up": "^2.0.0", + "has": "^1.0.3", + "is-core-module": "^2.6.0", "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "object.values": "^1.1.4", + "pkg-up": "^2.0.0", + "read-pkg-up": "^3.0.0", + "resolve": "^1.20.0", + "tsconfig-paths": "^3.11.0" }, "engines": { - "node": "*" + "node": ">=4" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0" } }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "dev": true, "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" + "ms": "2.0.0" } }, - "node_modules/global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "dev": true, "dependencies": { - "global-prefix": "^3.0.0" + "esutils": "^2.0.2" }, "engines": { - "node": ">=6" + "node": ">=0.10.0" } }, - "node_modules/global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "node_modules/eslint-plugin-import/node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", "dev": true, "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" + "locate-path": "^2.0.0" }, "engines": { - "node": ">=6" + "node": ">=4" } }, - "node_modules/globby": { - "version": "12.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-12.0.2.tgz", - "integrity": "sha512-lAsmb/5Lww4r7MM9nCCliDZVIKbZTavrsunAsHLr9oHthrZP1qi7/gAnHOsUs9bLvEt2vKVJhHmxuL7QbDuPdQ==", + "node_modules/eslint-plugin-import/node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, + "node_modules/eslint-plugin-import/node_modules/load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", "dev": true, "dependencies": { - "array-union": "^3.0.1", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.7", - "ignore": "^5.1.8", - "merge2": "^1.4.1", - "slash": "^4.0.0" + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, - "node_modules/globby/node_modules/array-union": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-3.0.1.tgz", - "integrity": "sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==", + "node_modules/eslint-plugin-import/node_modules/locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", "dev": true, - "engines": { - "node": ">=12" + "dependencies": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/globby/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, - "node_modules/globjoin": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/globjoin/-/globjoin-0.1.4.tgz", - "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", + "node_modules/eslint-plugin-import/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", "dev": true }, - "node_modules/graceful-fs": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", - "integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==", - "dev": true + "node_modules/eslint-plugin-import/node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } }, - "node_modules/hard-rejection": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", - "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", + "node_modules/eslint-plugin-import/node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", "dev": true, + "dependencies": { + "p-try": "^1.0.0" + }, "engines": { - "node": ">=6" + "node": ">=4" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/eslint-plugin-import/node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", "dev": true, "dependencies": { - "function-bind": "^1.1.1" + "p-limit": "^1.1.0" }, "engines": { - "node": ">= 0.4.0" + "node": ">=4" } }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "node_modules/eslint-plugin-import/node_modules/p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", "dev": true, "engines": { "node": ">=4" } }, - "node_modules/hosted-git-info": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.0.2.tgz", - "integrity": "sha512-c9OGXbZ3guC/xOlCg1Ci/VgWlwsqDv1yMQL1CWqXDL0hDjXuNcq0zuR4xqPSuasI3kqFDhqSyTjREz5gzq0fXg==", + "node_modules/eslint-plugin-import/node_modules/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", "dev": true, "dependencies": { - "lru-cache": "^6.0.0" + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" }, "engines": { - "node": ">=10" + "node": ">=4" } }, - "node_modules/html-tags": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", - "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", + "node_modules/eslint-plugin-import/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", "dev": true, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", + "node_modules/eslint-plugin-import/node_modules/path-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", "dev": true, + "dependencies": { + "pify": "^3.0.0" + }, "engines": { - "node": ">= 4" + "node": ">=4" } }, - "node_modules/immutable": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.0.0.tgz", - "integrity": "sha512-zIE9hX70qew5qTUjSS7wi1iwj/l7+m54KWU247nhM3v806UdGj1yDndXj+IOYxxtW9zyLI+xqFNZjTuDaLUqFw==", - "dev": true - }, - "node_modules/import-cwd": { + "node_modules/eslint-plugin-import/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-3.0.0.tgz", - "integrity": "sha512-4pnzH16plW+hgvRECbDWpQl3cqtvSofHWh44met7ESfZ8UZOWWddm8hEyDTqREJ9RbYHY8gi8DqmaelApoOGMg==", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", "dev": true, - "dependencies": { - "import-from": "^3.0.0" - }, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "node_modules/eslint-plugin-import/node_modules/read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", "dev": true, "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" }, "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, - "node_modules/import-from": { + "node_modules/eslint-plugin-import/node_modules/read-pkg-up": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/import-from/-/import-from-3.0.0.tgz", - "integrity": "sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ==", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", + "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", "dev": true, "dependencies": { - "resolve-from": "^5.0.0" + "find-up": "^2.0.0", + "read-pkg": "^3.0.0" }, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/import-from/node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "node_modules/eslint-plugin-import/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", "dev": true, - "engines": { - "node": ">=8" + "bin": { + "semver": "bin/semver" } }, - "node_modules/import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "node_modules/eslint-plugin-node": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", "dev": true, + "dependencies": { + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", + "ignore": "^5.1.1", + "minimatch": "^3.0.4", + "resolve": "^1.10.1", + "semver": "^6.1.0" + }, "engines": { - "node": ">=8" + "node": ">=8.10.0" + }, + "peerDependencies": { + "eslint": ">=5.16.0" } }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "node_modules/eslint-plugin-node/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", "dev": true, - "engines": { - "node": ">=0.8.19" + "bin": { + "semver": "bin/semver.js" } }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "node_modules/eslint-plugin-promise": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-5.1.1.tgz", + "integrity": "sha512-XgdcdyNzHfmlQyweOPTxmc7pIsS6dE4MvwhXWMQ2Dxs1XAL2GJDilUsjWen6TWik0aSI+zD/PqocZBblcm9rdA==", "dev": true, "engines": { - "node": ">=8" + "node": "^10.12.0 || >=12.0.0" + }, + "peerDependencies": { + "eslint": "^7.0.0" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "node_modules/eslint-plugin-react": { + "version": "7.25.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.25.3.tgz", + "integrity": "sha512-ZMbFvZ1WAYSZKY662MBVEWR45VaBT6KSJCiupjrNlcdakB90juaZeDCbJq19e73JZQubqFtgETohwgAt8u5P6w==", "dev": true, "dependencies": { - "once": "^1.3.0", - "wrappy": "1" + "array-includes": "^3.1.3", + "array.prototype.flatmap": "^1.2.4", + "doctrine": "^2.1.0", + "estraverse": "^5.2.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.0.4", + "object.entries": "^1.1.4", + "object.fromentries": "^2.0.4", + "object.hasown": "^1.0.0", + "object.values": "^1.1.4", + "prop-types": "^15.7.2", + "resolve": "^2.0.0-next.3", + "string.prototype.matchall": "^4.0.5" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7" } }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true + "node_modules/eslint-plugin-react/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.3", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.3.tgz", + "integrity": "sha512-W8LucSynKUIDu9ylraa7ueVZ7hc0uAgJBxVsQSKOXOyle8a93qXhcz+XAXZ8bIq2d6i4Ehddn6Evt+0/UwKk6Q==", + "dev": true, + "dependencies": { + "is-core-module": "^2.2.0", + "path-parse": "^1.0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "dev": true, "dependencies": { - "binary-extensions": "^2.0.0" + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" }, "engines": { - "node": ">=8" + "node": ">=8.0.0" } }, - "node_modules/is-core-module": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz", - "integrity": "sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A==", + "node_modules/eslint-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "dev": true, "dependencies": { - "has": "^1.0.3" + "eslint-visitor-keys": "^1.1.0" + }, + "engines": { + "node": ">=6" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/mysticatea" } }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">=4" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", "dev": true, "engines": { - "node": ">=8" + "node": ">=10" } }, - "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "dependencies": { - "is-extglob": "^2.1.1" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, - "engines": { - "node": ">=0.10.0" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, "engines": { - "node": ">=0.12.0" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/is-plain-object": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", - "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/is-regexp": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-2.1.0.tgz", - "integrity": "sha512-OZ4IlER3zmRIoB9AqNhEggVxqIH4ofDns5nRrPS6yQxXE1TPCUpFznBfRQmQa8uC+pXqjMnukiJBxCisIxiLGA==", + "node_modules/eslint/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, "engines": { - "node": ">=6" + "node": ">=7.0.0" } }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "node_modules/eslint/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "dev": true }, - "node_modules/js-tokens": { + "node_modules/eslint/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true + "node_modules/eslint/node_modules/ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true, + "engines": { + "node": ">= 4" + } }, - "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, "dependencies": { - "universalify": "^2.0.0" + "has-flag": "^4.0.0" }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" + "engines": { + "node": ">=8" } }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "node_modules/espree": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", + "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", "dev": true, + "dependencies": { + "acorn": "^7.4.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^1.3.0" + }, "engines": { - "node": ">=0.10.0" + "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/known-css-properties": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.24.0.tgz", - "integrity": "sha512-RTSoaUAfLvpR357vWzAz/50Q/BmHfmE6ETSWfutT0AJiw10e6CmcdYRQJlLRd95B53D0Y2aD1jSxD3V3ySF+PA==", - "dev": true + "node_modules/espree/node_modules/acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } }, - "node_modules/lilconfig": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.0.3.tgz", - "integrity": "sha512-EHKqr/+ZvdKCifpNrJCKxBTgk5XupZA3y/aCPY9mxfgBzmgh93Mt/WqjjQ38oMxXuvDokaKiM3lAgvSH2sjtHg==", + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", "dev": true, "engines": { - "node": ">=10" + "node": ">=4" } }, - "node_modules/lines-and-columns": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", - "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", - "dev": true + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } }, - "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "node_modules/esquery": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", + "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", "dev": true, "dependencies": { - "p-locate": "^4.1.0" + "estraverse": "^5.1.0" }, "engines": { - "node": ">=8" + "node": ">=0.10" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/lodash.difference": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.difference/-/lodash.difference-4.5.0.tgz", - "integrity": "sha1-nMtOUF1Ia5FlE0V3KIWi3yf9AXw=", - "dev": true - }, - "node_modules/lodash.forown": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.forown/-/lodash.forown-4.4.0.tgz", - "integrity": "sha1-hRFc8E9z75ZuztUlEdOJPMRmg68=", - "dev": true - }, - "node_modules/lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "node_modules/lodash.groupby": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz", - "integrity": "sha1-Cwih3PaDl8OXhVwyOXg4Mt90A9E=", - "dev": true - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=", - "dev": true - }, - "node_modules/lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=", - "dev": true - }, - "node_modules/lodash.truncate": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", - "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", - "dev": true - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=", - "dev": true + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "dependencies": { - "yallist": "^4.0.0" + "estraverse": "^5.2.0" }, "engines": { - "node": ">=10" + "node": ">=4.0" } }, - "node_modules/map-obj": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.2.1.tgz", - "integrity": "sha512-+WA2/1sPmDj1dlvvJmB5G6JKfY9dpn7EVBUL06+y6PoljPkh+6V1QihwxNkbcGxCRjt2b0F9K0taiCuo7MbdFQ==", + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4.0" } }, - "node_modules/mathml-tag-names": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", - "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "engines": { + "node": ">=4.0" } }, - "node_modules/mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==", - "dev": true - }, - "node_modules/meow": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz", - "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==", + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "dev": true, + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, "dependencies": { - "@types/minimist": "^1.2.0", - "camelcase-keys": "^6.2.2", - "decamelize": "^1.2.0", - "decamelize-keys": "^1.1.0", - "hard-rejection": "^2.1.0", - "minimist-options": "4.1.0", - "normalize-package-data": "^3.0.0", - "read-pkg-up": "^7.0.1", - "redent": "^3.0.0", - "trim-newlines": "^3.0.0", - "type-fest": "^0.18.0", - "yargs-parser": "^20.2.3" + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" }, "engines": { "node": ">=10" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sindresorhus/execa?sponsor=1" } }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "node_modules/execall": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/execall/-/execall-2.0.0.tgz", + "integrity": "sha512-0FU2hZ5Hh6iQnarpRtQurM/aAvp3RIbfvgLHrcqJYzhXyV2KFruhuChf9NC6waAhiUR7FFtlugkI4p7f2Fqlow==", "dev": true, + "dependencies": { + "clone-regexp": "^2.1.0" + }, "engines": { - "node": ">= 8" + "node": ">=8" } }, - "node_modules/micromatch": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", - "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.2.11", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz", + "integrity": "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", "dev": true, "dependencies": { - "braces": "^3.0.1", - "picomatch": "^2.2.3" + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" }, "engines": { - "node": ">=8.6" + "node": ">=8.6.0" } }, - "node_modules/min-indent": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", - "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "node_modules/fast-glob/node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, "engines": { - "node": ">=4" + "node": ">=8" } }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "node_modules/fast-glob/node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", "dev": true, "dependencies": { - "brace-expansion": "^1.1.7" + "to-regex-range": "^5.0.1" }, "engines": { - "node": "*" + "node": ">=8" } }, - "node_modules/minimist-options": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", - "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", + "node_modules/fast-glob/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, - "dependencies": { - "arrify": "^1.0.1", - "is-plain-obj": "^1.1.0", - "kind-of": "^6.0.3" - }, "engines": { - "node": ">= 6" + "node": ">=0.12.0" } }, - "node_modules/minimist-options/node_modules/is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "node_modules/fast-glob/node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, "engines": { - "node": ">=0.10.0" + "node": ">=8.6" } }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/nanoid": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz", - "integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==", + "node_modules/fast-glob/node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, - "bin": { - "nanoid": "bin/nanoid.cjs" + "dependencies": { + "is-number": "^7.0.0" }, "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + "node": ">=8.0" } }, - "node_modules/node-releases": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.1.tgz", - "integrity": "sha512-CqyzN6z7Q6aMeF/ktcMVTzhAHCEpf8SOarwpzpf8pNBY2k5/oM34UHldUwp8VKI7uxct2HxSRdJjBaZeESzcxA==", + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, - "node_modules/normalize-package-data": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.2.tgz", - "integrity": "sha512-6CdZocmfGaKnIHPVFhJJZ3GuR8SsLKvDANFp47Jmy51aKIr8akjAWTSxtpI+MBgBFdSMRyo4hMpDlT6dTffgZg==", + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz", + "integrity": "sha512-On2N+BpYJ15xIC974QNVuYGMOlEVt4s0EOI3wwMqOmK1fdDY+FN/zltPV8vosq4ad4c/gJ1KHScUn/6AWIgiow==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", + "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", "dev": true, "dependencies": { - "hosted-git-info": "^4.0.1", - "resolve": "^1.20.0", - "semver": "^7.3.4", - "validate-npm-package-license": "^3.0.1" - }, - "engines": { - "node": ">=10" + "reusify": "^1.0.4" } }, - "node_modules/normalize-package-data/node_modules/semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" + "flat-cache": "^3.0.4" }, "engines": { - "node": ">=10" + "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", + "node_modules/flat-cache": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", "dev": true, + "dependencies": { + "flatted": "^3.1.0", + "rimraf": "^3.0.2" + }, "engines": { - "node": ">=0.10.0" + "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/normalize-selector": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/normalize-selector/-/normalize-selector-0.2.0.tgz", - "integrity": "sha1-0LFF62kRicY6eNIB3E/bEpPvDAM=", + "node_modules/flatted": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.5.tgz", + "integrity": "sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg==", "dev": true }, - "node_modules/normalize-url": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "node_modules/fraction.js": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", + "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==", "dev": true, "engines": { - "node": ">=10" + "node": "*" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "patreon", + "url": "https://www.patreon.com/infusion" } }, - "node_modules/nth-check": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", - "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "node_modules/functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", + "dev": true + }, + "node_modules/get-intrinsic": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", + "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", "dev": true, "dependencies": { - "boolbase": "^1.0.0" + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" }, "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "node_modules/get-stdin": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", + "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", "dev": true, - "dependencies": { - "wrappy": "1" + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, - "dependencies": { - "p-try": "^2.0.0" - }, "engines": { - "node": ">=6" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "node_modules/get-symbol-description": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", + "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", "dev": true, "dependencies": { - "p-limit": "^2.2.0" + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" }, "engines": { - "node": ">=8" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, "engines": { - "node": ">=6" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, "dependencies": { - "callsites": "^3.0.0" + "is-glob": "^4.0.1" }, "engines": { - "node": ">=6" + "node": ">= 6" } }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "dev": true + }, + "node_modules/globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" + "type-fest": "^0.8.1" }, "engines": { "node": ">=8" @@ -1773,2121 +2333,4817 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "node_modules/globals/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", "dev": true, "engines": { "node": ">=8" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "node_modules/globjoin": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/globjoin/-/globjoin-0.1.4.tgz", + "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", "dev": true }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "node_modules/graceful-fs": { + "version": "4.2.9", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", + "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==", "dev": true }, - "node_modules/picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", - "dev": true, - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "node_modules/hard-rejection": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", + "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">=6" } }, - "node_modules/postcss": { - "version": "8.4.7", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.7.tgz", - "integrity": "sha512-L9Ye3r6hkkCeOETQX6iOaWZgjp3LL6Lpqm6EtgbKrgqGGteRMNb9vzBfRL96YOSu8o7x3MfIH9Mo5cPJFGrW6A==", + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", "dev": true, "dependencies": { - "nanoid": "^3.3.1", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "function-bind": "^1.1.1" }, "engines": { - "node": "^10 || ^12 || >=14" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - } - }, - "node_modules/postcss-calc": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", - "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0" - }, - "peerDependencies": { - "postcss": "^8.2.2" + "node": ">= 0.4.0" } }, - "node_modules/postcss-cli": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-9.1.0.tgz", - "integrity": "sha512-zvDN2ADbWfza42sAnj+O2uUWyL0eRL1V+6giM2vi4SqTR3gTYy8XzcpfwccayF2szcUif0HMmXiEaDv9iEhcpw==", + "node_modules/has-bigints": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", + "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", "dev": true, - "dependencies": { - "chokidar": "^3.3.0", - "dependency-graph": "^0.11.0", - "fs-extra": "^10.0.0", - "get-stdin": "^9.0.0", - "globby": "^12.0.0", - "picocolors": "^1.0.0", - "postcss-load-config": "^3.0.0", - "postcss-reporter": "^7.0.0", - "pretty-hrtime": "^1.0.3", - "read-cache": "^1.0.0", - "slash": "^4.0.0", - "yargs": "^17.0.0" - }, - "bin": { - "postcss": "index.js" - }, - "engines": { - "node": ">=12" - }, - "peerDependencies": { - "postcss": "^8.0.0" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-cli/node_modules/get-stdin": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-9.0.0.tgz", - "integrity": "sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==", + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", "dev": true, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, - "node_modules/postcss-cli/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true, "engines": { - "node": ">=12" + "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-colormin": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.0.tgz", - "integrity": "sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg==", + "node_modules/has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", "dev": true, "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "colord": "^2.9.1", - "postcss-value-parser": "^4.2.0" + "has-symbols": "^1.0.2" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-convert-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.0.tgz", - "integrity": "sha512-GkyPbZEYJiWtQB0KZ0X6qusqFHUepguBCNFi9t5JJc7I2OTXG7C0twbTLvCfaKOLl3rSXmpAwV7W5txd91V84g==", + "node_modules/hosted-git-info": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", + "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0" + "lru-cache": "^6.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=10" } }, - "node_modules/postcss-discard-comments": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.0.tgz", - "integrity": "sha512-L0IKF4jAshRyn03SkEO6ar/Ipz2oLywVbg2THf2EqqdNkBwmVMxuTR/RoAltOw4piiaLt3gCAdrbAqmTBInmhg==", + "node_modules/html-tags": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", + "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", "dev": true, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=8" } }, - "node_modules/postcss-discard-duplicates": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", - "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=10.17.0" } }, - "node_modules/postcss-discard-empty": { + "node_modules/icss-utils": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.0.tgz", - "integrity": "sha512-782T/buGgb3HOuHOJAHpdyKzAAKsv/BxWqsutnZ+QsiHEcDkY7v+6WWdturuBiSal6XMOO1p1aJvwXdqLD5vhA==", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", "dev": true, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^10 || ^12 || >= 14" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.1.0" } }, - "node_modules/postcss-discard-overridden": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", - "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "node_modules/ignore": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", + "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", "dev": true, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">= 4" } }, - "node_modules/postcss-load-config": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.0.tgz", - "integrity": "sha512-ipM8Ds01ZUophjDTQYSVP70slFSYg3T0/zyfII5vzhN6V57YSxMgG5syXuwi5VtS8wSf3iL30v0uBdoIVx4Q0g==", + "node_modules/immutable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.0.0.tgz", + "integrity": "sha512-zIE9hX70qew5qTUjSS7wi1iwj/l7+m54KWU247nhM3v806UdGj1yDndXj+IOYxxtW9zyLI+xqFNZjTuDaLUqFw==", + "dev": true + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", "dev": true, "dependencies": { - "import-cwd": "^3.0.0", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" }, "engines": { - "node": ">= 10" + "node": ">=6" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "ts-node": ">=9.0.0" - }, - "peerDependenciesMeta": { - "ts-node": { - "optional": true - } + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/postcss-media-query-parser": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", - "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", - "dev": true - }, - "node_modules/postcss-merge-longhand": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.0.tgz", - "integrity": "sha512-Gr46srN2tsLD8fudKYoHO56RG0BLQ2nsBRnSZGY04eNBPwTeWa9KeHrbL3tOLAHyB2aliikycPH2TMJG1U+W6g==", + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=4" } }, - "node_modules/postcss-merge-rules": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.0.tgz", - "integrity": "sha512-NecukEJovQ0mG7h7xV8wbYAkXGTO3MPKnXvuiXzOKcxoOodfTTKYjeo8TMhAswlSkjcPIBlnKbSFcTuVSDaPyQ==", + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", "dev": true, - "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=8" } }, - "node_modules/postcss-minify-font-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", - "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "node_modules/import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0" + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">=8" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/postcss-minify-gradients": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.0.tgz", - "integrity": "sha512-J/TMLklkONn3LuL8wCwfwU8zKC1hpS6VcxFkNUNjmVt53uKqrrykR3ov11mdUYyqVMEx67slMce0tE14cE4DTg==", + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", "dev": true, - "dependencies": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=0.8.19" } }, - "node_modules/postcss-minify-params": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.0.tgz", - "integrity": "sha512-q67dcts4Hct6x8+JmhBgctHkbvUsqGIg2IItenjE63iZXMbhjr7AlVZkNnKtIGt/1Wsv7p/7YzeSII6Q+KPXRg==", + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", "dev": true, - "dependencies": { - "browserslist": "^4.16.6", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=8" } }, - "node_modules/postcss-minify-selectors": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.0.tgz", - "integrity": "sha512-vYxvHkW+iULstA+ctVNx0VoRAR4THQQRkG77o0oa4/mBS0OzGvvzLIvHDv/nNEM0crzN2WIyFU5X7wZhaUK3RA==", + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", "dev": true, "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "once": "^1.3.0", + "wrappy": "1" } }, - "node_modules/postcss-nested": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.6.tgz", - "integrity": "sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA==", + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true + }, + "node_modules/internal-slot": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", + "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", "dev": true, "dependencies": { - "postcss-selector-parser": "^6.0.6" + "get-intrinsic": "^1.1.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" }, "engines": { - "node": ">=12.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.14" + "node": ">= 0.4" } }, - "node_modules/postcss-normalize-charset": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", - "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", "dev": true, - "engines": { - "node": "^10 || ^12 || >=14.0" + "dependencies": { + "has-bigints": "^1.0.1" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-normalize-display-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", - "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "node_modules/is-boolean-object": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-normalize-positions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.0.tgz", - "integrity": "sha512-8gmItgA4H5xiUxgN/3TVvXRoJxkAWLW6f/KKhdsH03atg0cB8ilXnrB5PpSshwVu/dD2ZsRFQcR1OEmSBDAgcQ==", + "node_modules/is-callable": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz", + "integrity": "sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==", "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-normalize-repeat-style": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.0.tgz", - "integrity": "sha512-IR3uBjc+7mcWGL6CtniKNQ4Rr5fTxwkaDHwMBDGGs1x9IVRkYIT/M4NelZWkAOBdV6v3Z9S46zqaKGlyzHSchw==", + "node_modules/is-core-module": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.1.tgz", + "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" + "has": "^1.0.3" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-normalize-string": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", - "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "node_modules/is-date-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0" + "has-tostringtag": "^1.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-normalize-timing-functions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", - "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=0.10.0" } }, - "node_modules/postcss-normalize-unicode": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.0.tgz", - "integrity": "sha512-J6M3MizAAZ2dOdSjy2caayJLQT8E8K9XjLce8AUQMwOrCvjCHv24aLC/Lps1R1ylOfol5VIDMaM/Lo9NGlk1SQ==", + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "dev": true, - "dependencies": { - "browserslist": "^4.16.6", - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=8" } }, - "node_modules/postcss-normalize-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", - "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, "dependencies": { - "normalize-url": "^6.0.1", - "postcss-value-parser": "^4.2.0" + "is-extglob": "^2.1.1" }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=0.10.0" } }, - "node_modules/postcss-normalize-whitespace": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.0.tgz", - "integrity": "sha512-7O1FanKaJkpWFyCghFzIkLhehujV/frGkdofGLwhg5upbLyGsSfiTcZAdSzoPsSUgyPCkBkNMeWR8yVgPdQybg==", + "node_modules/is-negative-zero": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", + "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-ordered-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.0.tgz", - "integrity": "sha512-wU4Z4D4uOIH+BUKkYid36gGDJNQtkVJT7Twv8qH6UyfttbbJWyw4/xIPuVEkkCtQLAJ0EdsNSh8dlvqkXb49TA==", + "node_modules/is-number-object": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.6.tgz", + "integrity": "sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==", "dev": true, "dependencies": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" + "has-tostringtag": "^1.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-reduce-initial": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.0.tgz", - "integrity": "sha512-5OgTUviz0aeH6MtBjHfbr57tml13PuedK/Ecg8szzd4XRMbYxH4572JFG067z+FqBIf6Zp/d+0581glkvvWMFw==", + "node_modules/is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", "dev": true, - "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=0.10.0" } }, - "node_modules/postcss-reduce-transforms": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", - "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "node": ">=0.10.0" } }, - "node_modules/postcss-reporter": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-7.0.4.tgz", - "integrity": "sha512-jY/fnpGSin7kwJeunXbY35STp5O3VIxSFdjee5JkoPQ+FfGH5JW3N+Xe9oAPcL9UkjWjkK+JC72o8XH4XXKdhw==", + "node_modules/is-regex": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", "dev": true, "dependencies": { - "lodash.difference": "^4.5.0", - "lodash.forown": "^4.4.0", - "lodash.get": "^4.4.2", - "lodash.groupby": "^4.6.0", - "lodash.sortby": "^4.7.0", - "picocolors": "^1.0.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" }, "engines": { - "node": ">=10" + "node": ">= 0.4" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.1.0" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-resolve-nested-selector": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/postcss-resolve-nested-selector/-/postcss-resolve-nested-selector-0.1.1.tgz", - "integrity": "sha1-Kcy8fDfe36wwTp//C/FZaz9qDk4=", - "dev": true + "node_modules/is-regexp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-2.1.0.tgz", + "integrity": "sha512-OZ4IlER3zmRIoB9AqNhEggVxqIH4ofDns5nRrPS6yQxXE1TPCUpFznBfRQmQa8uC+pXqjMnukiJBxCisIxiLGA==", + "dev": true, + "engines": { + "node": ">=6" + } }, - "node_modules/postcss-safe-parser": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-6.0.0.tgz", - "integrity": "sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==", + "node_modules/is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true, "engines": { - "node": ">=12.0" + "node": ">=8" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.3.3" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/postcss-selector-parser": { - "version": "6.0.9", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz", - "integrity": "sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ==", + "node_modules/is-string": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", "dev": true, "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" + "has-tostringtag": "^1.0.0" }, "engines": { - "node": ">=4" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-svgo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", - "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "node_modules/is-symbol": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", "dev": true, "dependencies": { - "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" + "has-symbols": "^1.0.2" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 0.4" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-unique-selectors": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.0.tgz", - "integrity": "sha512-LmUhgGobtpeVJJHuogzjLRwJlN7VH+BL5c9GKMVJSS/ejoyePZkXvNsYUtk//F6vKOGK86gfRS0xH7fXQSDtvA==", + "node_modules/is-weakref": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", + "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", "dev": true, "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" + "call-bind": "^1.0.2" }, - "peerDependencies": { - "postcss": "^8.2.15" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", "dev": true }, - "node_modules/pretty-hrtime": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz", - "integrity": "sha1-t+PqQkNaTJsnWdmeDyAesZWALuE=", + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", "dev": true, "engines": { - "node": ">= 0.8" + "node": ">=0.10.0" } }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", "dev": true, + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, "engines": { - "node": ">=6" + "node": ">= 10.13.0" } }, - "node_modules/queue-microtask": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.2.tgz", - "integrity": "sha512-dB15eXv3p2jDlbOiNLyMabYg1/sXvppd8DP2J3EOCQ0AkuSXCW2tP7mnVouVLJKgUMY6yP0kcQDVpLCN13h4Xg==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/quick-lru": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", - "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "node_modules/jest-worker/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, "engines": { "node": ">=8" } }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha1-5mTvMRYRZsl1HNvo28+GtftY93Q=", + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, "dependencies": { - "pify": "^2.3.0" + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" } }, - "node_modules/read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", - "dev": true, - "dependencies": { - "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - }, - "engines": { - "node": ">=8" - } + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true }, - "node_modules/read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, "dependencies": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - }, - "engines": { - "node": ">=8" + "argparse": "^1.0.7", + "esprima": "^4.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/read-pkg-up/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true, - "engines": { - "node": ">=8" + "bin": { + "js-yaml": "bin/js-yaml.js" } }, - "node_modules/read-pkg/node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "node_modules/json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", "dev": true }, - "node_modules/read-pkg/node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true }, - "node_modules/read-pkg/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "bin": { - "semver": "bin/semver" - } + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true }, - "node_modules/read-pkg/node_modules/type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true, - "engines": { - "node": ">=8" - } + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true }, - "node_modules/readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "node_modules/json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", "dev": true, "dependencies": { - "picomatch": "^2.2.1" + "minimist": "^1.2.0" }, - "engines": { - "node": ">=8.10.0" + "bin": { + "json5": "lib/cli.js" } }, - "node_modules/redent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", - "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "node_modules/jsx-ast-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.1.tgz", + "integrity": "sha512-uP5vu8xfy2F9A6LGC22KO7e2/vGTS1MhP+18f++ZNlf0Ohaxbc9nIEwHAsejlJKyzfZzU5UIhe5ItYkitcZnZA==", "dev": true, "dependencies": { - "indent-string": "^4.0.0", - "strip-indent": "^3.0.0" + "array-includes": "^3.1.3", + "object.assign": "^4.1.2" }, "engines": { - "node": ">=8" + "node": ">=4.0" } }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "node_modules/klona": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", + "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">= 8" } }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "node_modules/known-css-properties": { + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.24.0.tgz", + "integrity": "sha512-RTSoaUAfLvpR357vWzAz/50Q/BmHfmE6ETSWfutT0AJiw10e6CmcdYRQJlLRd95B53D0Y2aD1jSxD3V3ySF+PA==", + "dev": true + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">= 0.8.0" } }, - "node_modules/resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/load-json-file": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", + "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", "dev": true, "dependencies": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" + "graceful-fs": "^4.1.15", + "parse-json": "^4.0.0", + "pify": "^4.0.1", + "strip-bom": "^3.0.0", + "type-fest": "^0.3.0" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "engines": { + "node": ">=6" } }, - "node_modules/resolve-from": { + "node_modules/load-json-file/node_modules/parse-json": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", "dev": true, + "dependencies": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + }, "engines": { "node": ">=4" } }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "node_modules/load-json-file/node_modules/type-fest": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", + "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", "dev": true, "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">=6" } }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "node_modules/loader-runner": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.2.0.tgz", + "integrity": "sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw==", "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "queue-microtask": "^1.2.2" + "engines": { + "node": ">=6.11.5" } }, - "node_modules/sass": { - "version": "1.49.9", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.49.9.tgz", - "integrity": "sha512-YlYWkkHP9fbwaFRZQRXgDi3mXZShslVmmo+FVK3kHLUELHHEYrCmL1x6IUjC7wLS6VuJSAFXRQS/DxdsC4xL1A==", + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "dependencies": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" - }, - "bin": { - "sass": "sass.js" + "p-locate": "^4.1.0" }, "engines": { - "node": ">=12.0.0" + "node": ">=8" } }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "dev": true }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", + "dev": true + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", "dev": true, - "engines": { - "node": ">=8" + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" } }, - "node_modules/slice-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", - "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", "dev": true, "dependencies": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" + "yallist": "^4.0.0" }, "engines": { "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, - "node_modules/slice-ansi/node_modules/ansi-styles": { + "node_modules/map-obj": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", + "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, "engines": { "node": ">=8" }, "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/slice-ansi/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "node_modules/mathml-tag-names": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", + "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/slice-ansi/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", + "node_modules/meow": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz", + "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==", "dev": true, "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize": "^1.2.0", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true, - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" + "engines": { + "node": ">= 8" } }, - "node_modules/spdx-license-ids": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.9.tgz", - "integrity": "sha512-Ki212dKK4ogX+xDo4CtOZBVIwhsKBEfsEEcwmJfLQzirgc2jIWdzg40Unxz/HzEUqM1WFzVlQSMF9kZZ2HboLQ==", - "dev": true - }, - "node_modules/specificity": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/specificity/-/specificity-0.4.1.tgz", - "integrity": "sha512-1klA3Gi5PD1Wv9Q0wUoOQN1IWAuPu0D1U03ThXTr0cJ20+/iq2tHSDnK7Kk/0LXJ1ztUB2/1Os0wKmfyNgUQfg==", + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "dev": true, - "bin": { - "specificity": "bin/specificity" + "engines": { + "node": ">= 0.6" } }, - "node_modules/stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", - "dev": true - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dev": true, "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "mime-db": "1.52.0" }, "engines": { - "node": ">=8" + "node": ">= 0.6" } }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, "engines": { - "node": ">=8" + "node": ">=6" } }, - "node_modules/strip-indent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", - "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", "dev": true, - "dependencies": { - "min-indent": "^1.0.0" - }, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/style-search": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/style-search/-/style-search-0.1.0.tgz", - "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", - "dev": true - }, - "node_modules/stylehacks": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.0.tgz", - "integrity": "sha512-SzLmvHQTrIWfSgljkQCw2++C9+Ne91d/6Sp92I8c5uHTcy/PgeHamwITIbBW9wnFTY/3ZfSXR9HIL6Ikqmcu6Q==", + "node_modules/mini-css-extract-plugin": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.0.tgz", + "integrity": "sha512-ndG8nxCEnAemsg4FSgS+yNyHKgkTB4nPKqCOgh65j3/30qqC5RaSQQXMm++Y6sb6E1zRSxPkztj9fqxhS1Eo6w==", "dev": true, "dependencies": { - "browserslist": "^4.16.6", - "postcss-selector-parser": "^6.0.4" + "schema-utils": "^4.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" }, "peerDependencies": { - "postcss": "^8.2.15" + "webpack": "^5.0.0" } }, - "node_modules/stylelint": { - "version": "14.5.3", - "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.5.3.tgz", - "integrity": "sha512-omHETL+kGHR+fCXFK1SkZD/A+emCP9esggAdWEl8GPjTNeyRYj+H6uetRDcU+7E451zwWiUYGVAX+lApsAZgsQ==", + "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", + "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", "dev": true, "dependencies": { - "balanced-match": "^2.0.0", - "colord": "^2.9.2", - "cosmiconfig": "^7.0.1", - "css-functions-list": "^3.0.1", - "debug": "^4.3.3", - "execall": "^2.0.0", - "fast-glob": "^3.2.11", - "fastest-levenshtein": "^1.0.12", - "file-entry-cache": "^6.0.1", - "get-stdin": "^8.0.0", - "global-modules": "^2.0.0", - "globby": "^11.1.0", - "globjoin": "^0.1.4", - "html-tags": "^3.1.0", - "ignore": "^5.2.0", - "import-lazy": "^4.0.0", - "imurmurhash": "^0.1.4", - "is-plain-object": "^5.0.0", - "known-css-properties": "^0.24.0", - "mathml-tag-names": "^2.1.3", - "meow": "^9.0.0", - "micromatch": "^4.0.4", - "normalize-path": "^3.0.0", - "normalize-selector": "^0.2.0", - "picocolors": "^1.0.0", - "postcss": "^8.4.6", - "postcss-media-query-parser": "^0.2.3", - "postcss-resolve-nested-selector": "^0.1.1", - "postcss-safe-parser": "^6.0.0", - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0", - "resolve-from": "^5.0.0", - "specificity": "^0.4.1", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "style-search": "^0.1.0", - "supports-hyperlinks": "^2.2.0", - "svg-tags": "^1.0.0", - "table": "^6.8.0", - "v8-compile-cache": "^2.3.0", - "write-file-atomic": "^4.0.1" - }, - "bin": { - "stylelint": "bin/stylelint.js" + "@types/json-schema": "^7.0.9", + "ajv": "^8.8.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">= 12.13.0" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/stylelint" - } - }, - "node_modules/stylelint-config-recommended": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-7.0.0.tgz", - "integrity": "sha512-yGn84Bf/q41J4luis1AZ95gj0EQwRX8lWmGmBwkwBNSkpGSpl66XcPTulxGa/Z91aPoNGuIGBmFkcM1MejMo9Q==", - "dev": true, - "peerDependencies": { - "stylelint": "^14.4.0" + "url": "https://opencollective.com/webpack" } }, - "node_modules/stylelint-config-standard": { - "version": "25.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-25.0.0.tgz", - "integrity": "sha512-21HnP3VSpaT1wFjFvv9VjvOGDtAviv47uTp3uFmzcN+3Lt+RYRv6oAplLaV51Kf792JSxJ6svCJh/G18E9VnCA==", + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "dependencies": { - "stylelint-config-recommended": "^7.0.0" + "brace-expansion": "^1.1.7" }, - "peerDependencies": { - "stylelint": "^14.4.0" + "engines": { + "node": "*" } }, - "node_modules/stylelint-scss": { + "node_modules/minimist": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", + "dev": true + }, + "node_modules/minimist-options": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.1.0.tgz", - "integrity": "sha512-BNYTo7MMamhFOlcaAWp2dMpjg6hPyM/FFqfDIYzmYVLMmQJqc8lWRIiTqP4UX5bresj9Vo0dKC6odSh43VP2NA==", + "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", + "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", "dev": true, "dependencies": { - "lodash": "^4.17.21", - "postcss-media-query-parser": "^0.2.3", - "postcss-resolve-nested-selector": "^0.1.1", - "postcss-selector-parser": "^6.0.6", - "postcss-value-parser": "^4.1.0" + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0", + "kind-of": "^6.0.3" }, - "peerDependencies": { - "stylelint": "^14.0.0" + "engines": { + "node": ">= 6" } }, - "node_modules/stylelint/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "node_modules/minimist-options/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true, - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=0.10.0" } }, - "node_modules/stylelint/node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/nanoid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.2.tgz", + "integrity": "sha512-CuHBogktKwpm5g2sRgv83jEy2ijFzBwMoYA60orPDR7ynsLijJDqgsi4RDGj3OJpy3Ieb+LYwiRmIOGyytgITA==", "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, "engines": { - "node": ">=8" + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.2.tgz", + "integrity": "sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg==", + "dev": true + }, + "node_modules/normalize-package-data": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz", + "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==", "dev": true, "dependencies": { - "has-flag": "^3.0.0" + "hosted-git-info": "^4.0.1", + "is-core-module": "^2.5.0", + "semver": "^7.3.4", + "validate-npm-package-license": "^3.0.1" }, "engines": { - "node": ">=4" + "node": ">=10" } }, - "node_modules/supports-hyperlinks": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz", - "integrity": "sha512-6sXEzV5+I5j8Bmq9/vUphGRM/RJNT9SCURJLjwfOg51heRtguGWDzcaBlgAzKhQa0EVNpPEKzQuBwZ8S8WaCeQ==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0", - "supports-color": "^7.0.0" - }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, "engines": { - "node": ">=8" + "node": ">=0.10.0" } }, - "node_modules/supports-hyperlinks/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", "dev": true, "engines": { - "node": ">=8" + "node": ">=0.10.0" } }, - "node_modules/supports-hyperlinks/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "node_modules/normalize-selector": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/normalize-selector/-/normalize-selector-0.2.0.tgz", + "integrity": "sha1-0LFF62kRicY6eNIB3E/bEpPvDAM=", + "dev": true + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, "dependencies": { - "has-flag": "^4.0.0" + "path-key": "^3.0.0" }, "engines": { "node": ">=8" } }, - "node_modules/svg-tags": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", - "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", - "dev": true + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "node_modules/svgo": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", - "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "node_modules/object-inspect": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.0.tgz", + "integrity": "sha512-Ho2z80bVIvJloH+YzRmpZVQe87+qASmBUKZDWgx9cu+KDrX2ZDH/3tMy+gXbZETVGs2M8YdxObOh7XAtim9Y0g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", "dev": true, - "dependencies": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" - }, - "bin": { - "svgo": "bin/svgo" - }, "engines": { - "node": ">=10.13.0" + "node": ">= 0.4" } }, - "node_modules/table": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/table/-/table-6.8.0.tgz", - "integrity": "sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==", + "node_modules/object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "dev": true, "dependencies": { - "ajv": "^8.0.1", - "lodash.truncate": "^4.4.2", - "slice-ansi": "^4.0.0", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1" + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" }, "engines": { - "node": ">=10.0.0" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=", - "dev": true - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "node_modules/object.entries": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz", + "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==", "dev": true, "dependencies": { - "is-number": "^7.0.0" + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" }, "engines": { - "node": ">=8.0" + "node": ">= 0.4" } }, - "node_modules/trim-newlines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", - "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", + "node_modules/object.fromentries": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz", + "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==", "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" + }, "engines": { - "node": ">=8" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/type-fest": { - "version": "0.18.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", - "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", + "node_modules/object.hasown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.0.tgz", + "integrity": "sha512-MhjYRfj3GBlhSkDHo6QmvgjRLXQ2zndabdf3nX0yTyZK9rPfxb6uRpAac8HXNLy1GpqWtZ81Qh4v3uOls2sRAg==", "dev": true, - "engines": { - "node": ">=10" + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "node_modules/object.values": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" + }, "engines": { - "node": ">= 10.0.0" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", "dev": true, "dependencies": { - "punycode": "^2.1.0" + "wrappy": "1" } }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true - }, - "node_modules/v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", - "dev": true - }, - "node_modules/validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "node_modules/optionator": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", "dev": true, "dependencies": { - "isexe": "^2.0.0" + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" }, - "bin": { - "which": "bin/which" + "engines": { + "node": ">= 0.8.0" } }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "p-try": "^2.0.0" }, "engines": { - "node": ">=10" + "node": ">=6" }, "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, "dependencies": { - "color-convert": "^2.0.1" + "p-limit": "^2.2.0" }, "engines": { "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/wrap-ansi/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, "engines": { - "node": ">=7.0.0" + "node": ">=6" } }, - "node_modules/wrap-ansi/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "node_modules/write-file-atomic": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.1.tgz", - "integrity": "sha512-nSKUxgAbyioruk6hU87QzVbY279oYT6uiwgDoujth2ju4mJ+TZau7SQBhtbTmUyuNYTuXnSyRn66FV0+eCgcrQ==", + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, "dependencies": { - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.7" + "callsites": "^3.0.0" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" + "node": ">=6" } }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, "engines": { - "node": ">=10" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/yallist": { + "node_modules/path-exists": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, "engines": { - "node": ">= 6" + "node": ">=8" } }, - "node_modules/yargs": { - "version": "17.3.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.3.0.tgz", - "integrity": "sha512-GQl1pWyDoGptFPJx9b9L6kmR33TGusZvXIZUT+BOz9f7X2L94oeAskFYLEg/FkhV06zZPBYLvLZRWeYId29lew==", + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", "dev": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.0.0" - }, "engines": { - "node": ">=12" + "node": ">=0.10.0" } }, - "node_modules/yargs-parser": { - "version": "20.2.6", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.6.tgz", - "integrity": "sha512-AP1+fQIWSM/sMiET8fyayjx/J+JmTPt2Mr0FkrgqB4todtfa53sOsrSAcIrJRD5XS20bKUwaDIuMkWKCEiQLKA==", + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, "engines": { - "node": ">=10" + "node": ">=8" } }, - "node_modules/yargs/node_modules/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-z9kApYUOCwoeZ78rfRYYWdiU/iNL6mwwYlkkZfJoyMR1xps+NEBX5X7XmRpxkZHhXJ6+Ey00IwKxBBSW9FIjyA==", + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true, "engines": { - "node": ">=12" + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-conf": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-3.1.0.tgz", + "integrity": "sha512-m0OTbR/5VPNPqO1ph6Fqbj7Hv6QU7gR/tQW40ZqrL1rjgCU85W6C1bJn0BItuJqnR98PWzw7Z8hHeChD1WrgdQ==", + "dev": true, + "dependencies": { + "find-up": "^3.0.0", + "load-json-file": "^5.2.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-conf/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-conf/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-conf/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-conf/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", + "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", + "dev": true, + "dependencies": { + "find-up": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "dependencies": { + "locate-path": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "dependencies": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "dependencies": { + "p-try": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "dependencies": { + "p-limit": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.12", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.12.tgz", + "integrity": "sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + } + ], + "dependencies": { + "nanoid": "^3.3.1", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-loader": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-6.2.1.tgz", + "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", + "dev": true, + "dependencies": { + "cosmiconfig": "^7.0.0", + "klona": "^2.0.5", + "semver": "^7.3.5" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-media-query-parser": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", + "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", + "dev": true + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz", + "integrity": "sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==", + "dev": true, + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", + "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dev": true, + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-resolve-nested-selector": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/postcss-resolve-nested-selector/-/postcss-resolve-nested-selector-0.1.1.tgz", + "integrity": "sha1-Kcy8fDfe36wwTp//C/FZaz9qDk4=", + "dev": true + }, + "node_modules/postcss-safe-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-6.0.0.tgz", + "integrity": "sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==", + "dev": true, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.3.3" + } + }, + "node_modules/postcss-scss": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.3.tgz", + "integrity": "sha512-j4KxzWovfdHsyxwl1BxkUal/O4uirvHgdzMKS1aWJBAV0qh2qj5qAZqpeBfVUYGWv+4iK9Az7SPyZ4fyNju1uA==", + "dev": true, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.3.3" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz", + "integrity": "sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ==", + "dev": true, + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", + "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true + }, + "node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg/node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, + "node_modules/read-pkg/node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/read-pkg/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.1.tgz", + "integrity": "sha512-pMR7hBVUUGI7PMA37m2ofIdQCsomVnas+Jn5UPGAHQ+/LlwKm/aTLJHdasmHRzlfeZwHiAOaRSo2rbBDm3nNUQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexpp": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.0.tgz", + "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", + "dev": true, + "dependencies": { + "is-core-module": "^2.8.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/sass": { + "version": "1.49.10", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.49.10.tgz", + "integrity": "sha512-w37zfWJwKu4I78U4z63u1mmgoncq+v3iOB4yzQMPyAPVHHawaQSnu9C9ysGQnZEhW609jkcLioJcMCqm75JMdg==", + "dev": true, + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/sass-loader": { + "version": "12.6.0", + "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-12.6.0.tgz", + "integrity": "sha512-oLTaH0YCtX4cfnJZxKSLAyglED0naiYfNG1iXfU5w1LNZ+ukoA5DtyDIN5zmKVZwYNJP4KRc5Y3hkWga+7tYfA==", + "dev": true, + "dependencies": { + "klona": "^2.0.4", + "neo-async": "^2.6.2" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "fibers": ">= 3.1.0", + "node-sass": "^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0", + "sass": "^1.3.0", + "sass-embedded": "*", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "fibers": { + "optional": true + }, + "node-sass": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + } + } + }, + "node_modules/sass/node_modules/anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/sass/node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/sass/node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sass/node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/sass/node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sass/node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/sass/node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sass/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/sass/node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/sass/node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/schema-utils": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", + "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/schema-utils/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/schema-utils/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "dev": true, + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/schema-utils/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/semver": { + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dev": true, + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallow-clone/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/slice-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spdx-correct": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", + "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", + "dev": true, + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", + "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", + "dev": true + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz", + "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==", + "dev": true + }, + "node_modules/specificity": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/specificity/-/specificity-0.4.1.tgz", + "integrity": "sha512-1klA3Gi5PD1Wv9Q0wUoOQN1IWAuPu0D1U03ThXTr0cJ20+/iq2tHSDnK7Kk/0LXJ1ztUB2/1Os0wKmfyNgUQfg==", + "dev": true, + "bin": { + "specificity": "bin/specificity" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "node_modules/standard": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/standard/-/standard-16.0.4.tgz", + "integrity": "sha512-2AGI874RNClW4xUdM+bg1LRXVlYLzTNEkHmTG5mhyn45OhbgwA+6znowkOGYy+WMb5HRyELvtNy39kcdMQMcYQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "eslint": "~7.18.0", + "eslint-config-standard": "16.0.3", + "eslint-config-standard-jsx": "10.0.0", + "eslint-plugin-import": "~2.24.2", + "eslint-plugin-node": "~11.1.0", + "eslint-plugin-promise": "~5.1.0", + "eslint-plugin-react": "~7.25.1", + "standard-engine": "^14.0.1" + }, + "bin": { + "standard": "bin/cmd.js" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/standard-engine": { + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", + "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "get-stdin": "^8.0.0", + "minimist": "^1.2.5", + "pkg-conf": "^3.1.0", + "xdg-basedir": "^4.0.0" + }, + "engines": { + "node": ">=8.10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.7.tgz", + "integrity": "sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.3", + "regexp.prototype.flags": "^1.4.1", + "side-channel": "^1.0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", + "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", + "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-search": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/style-search/-/style-search-0.1.0.tgz", + "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", + "dev": true + }, + "node_modules/stylelint": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.6.1.tgz", + "integrity": "sha512-FfNdvZUZdzh9KDQxDnO7Opp+prKh8OQVuSW8S13cBtxrooCbm6J6royhUeb++53WPMt04VB+ZbOz/QmzAijs6Q==", + "dev": true, + "dependencies": { + "balanced-match": "^2.0.0", + "colord": "^2.9.2", + "cosmiconfig": "^7.0.1", + "css-functions-list": "^3.0.1", + "debug": "^4.3.4", + "execall": "^2.0.0", + "fast-glob": "^3.2.11", + "fastest-levenshtein": "^1.0.12", + "file-entry-cache": "^6.0.1", + "get-stdin": "^8.0.0", + "global-modules": "^2.0.0", + "globby": "^11.1.0", + "globjoin": "^0.1.4", + "html-tags": "^3.1.0", + "ignore": "^5.2.0", + "import-lazy": "^4.0.0", + "imurmurhash": "^0.1.4", + "is-plain-object": "^5.0.0", + "known-css-properties": "^0.24.0", + "mathml-tag-names": "^2.1.3", + "meow": "^9.0.0", + "micromatch": "^4.0.4", + "normalize-path": "^3.0.0", + "normalize-selector": "^0.2.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.12", + "postcss-media-query-parser": "^0.2.3", + "postcss-resolve-nested-selector": "^0.1.1", + "postcss-safe-parser": "^6.0.0", + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0", + "resolve-from": "^5.0.0", + "specificity": "^0.4.1", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "style-search": "^0.1.0", + "supports-hyperlinks": "^2.2.0", + "svg-tags": "^1.0.0", + "table": "^6.8.0", + "v8-compile-cache": "^2.3.0", + "write-file-atomic": "^4.0.1" + }, + "bin": { + "stylelint": "bin/stylelint.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/stylelint" + } + }, + "node_modules/stylelint-config-recommended": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-6.0.0.tgz", + "integrity": "sha512-ZorSSdyMcxWpROYUvLEMm0vSZud2uB7tX1hzBZwvVY9SV/uly4AvvJPPhCcymZL3fcQhEQG5AELmrxWqtmzacw==", + "dev": true, + "peerDependencies": { + "stylelint": "^14.0.0" + } + }, + "node_modules/stylelint-config-recommended-scss": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended-scss/-/stylelint-config-recommended-scss-5.0.2.tgz", + "integrity": "sha512-b14BSZjcwW0hqbzm9b0S/ScN2+3CO3O4vcMNOw2KGf8lfVSwJ4p5TbNEXKwKl1+0FMtgRXZj6DqVUe/7nGnuBg==", + "dev": true, + "dependencies": { + "postcss-scss": "^4.0.2", + "stylelint-config-recommended": "^6.0.0", + "stylelint-scss": "^4.0.0" + }, + "peerDependencies": { + "stylelint": "^14.0.0" + } + }, + "node_modules/stylelint-config-standard": { + "version": "24.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-24.0.0.tgz", + "integrity": "sha512-+RtU7fbNT+VlNbdXJvnjc3USNPZRiRVp/d2DxOF/vBDDTi0kH5RX2Ny6errdtZJH3boO+bmqIYEllEmok4jiuw==", + "dev": true, + "dependencies": { + "stylelint-config-recommended": "^6.0.0" + }, + "peerDependencies": { + "stylelint": "^14.0.0" + } + }, + "node_modules/stylelint-config-standard-scss": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard-scss/-/stylelint-config-standard-scss-3.0.0.tgz", + "integrity": "sha512-zt3ZbzIbllN1iCmc94e4pDxqpkzeR6CJo5DDXzltshuXr+82B8ylHyMMARNnUYrZH80B7wgY7UkKTYCFM0UUyw==", + "dev": true, + "dependencies": { + "stylelint-config-recommended-scss": "^5.0.2", + "stylelint-config-standard": "^24.0.0" + }, + "peerDependencies": { + "stylelint": "^14.0.0" + } + }, + "node_modules/stylelint-scss": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.2.0.tgz", + "integrity": "sha512-HHHMVKJJ5RM9pPIbgJ/XA67h9H0407G68Rm69H4fzFbFkyDMcTV1Byep3qdze5+fJ3c0U7mJrbj6S0Fg072uZA==", + "dev": true, + "dependencies": { + "lodash": "^4.17.21", + "postcss-media-query-parser": "^0.2.3", + "postcss-resolve-nested-selector": "^0.1.1", + "postcss-selector-parser": "^6.0.6", + "postcss-value-parser": "^4.1.0" + }, + "peerDependencies": { + "stylelint": "^14.5.1" + } + }, + "node_modules/stylelint/node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/stylelint/node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/stylelint/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/stylelint/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stylelint/node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/stylelint/node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/supports-hyperlinks": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz", + "integrity": "sha512-6sXEzV5+I5j8Bmq9/vUphGRM/RJNT9SCURJLjwfOg51heRtguGWDzcaBlgAzKhQa0EVNpPEKzQuBwZ8S8WaCeQ==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-tags": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", + "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", + "dev": true + }, + "node_modules/table": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/table/-/table-6.8.0.tgz", + "integrity": "sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.12.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.12.1.tgz", + "integrity": "sha512-NXbs+7nisos5E+yXwAD+y7zrcTkMqb0dEJxIGtSKPdCBzopf7ni4odPul2aechpV7EXNvOudYOX2bb5tln1jbQ==", + "dev": true, + "dependencies": { + "acorn": "^8.5.0", + "commander": "^2.20.0", + "source-map": "~0.7.2", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.1.tgz", + "integrity": "sha512-GvlZdT6wPQKbDNW/GDQzZFg/j4vKU96yl2q6mcUkzKOgW4gwf1Z8cZToUCrz31XHlPWH8MVb1r2tFtdDtTGJ7g==", + "dev": true, + "dependencies": { + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1", + "terser": "^5.7.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true + }, + "node_modules/terser/node_modules/source-map": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", + "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true + }, + "node_modules/trim-newlines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", + "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/tsconfig-paths": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", + "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", + "dev": true, + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.18.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", + "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", + "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.1", + "has-bigints": "^1.0.1", + "has-symbols": "^1.0.2", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "node_modules/v8-compile-cache": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", + "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", + "dev": true + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/watchpack": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", + "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", + "dev": true, + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack": { + "version": "5.70.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.70.0.tgz", + "integrity": "sha512-ZMWWy8CeuTTjCxbeaQI21xSswseF2oNOwc70QSKNePvmxE7XW36i7vpBMYZFAUHPwQiEbNGCEYIOOlyRbdGmxw==", + "dev": true, + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^0.0.51", + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/wasm-edit": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1", + "acorn": "^8.4.1", + "acorn-import-assertions": "^1.7.6", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.9.2", + "es-module-lexer": "^0.9.0", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.1.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.1.3", + "watchpack": "^2.3.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-cli": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-4.9.2.tgz", + "integrity": "sha512-m3/AACnBBzK/kMTcxWHcZFPrw/eQuY4Df1TxvIWfWM2x7mRqBQCqKEd96oCUa9jkapLBaFfRce33eGDb4Pr7YQ==", + "dev": true, + "dependencies": { + "@discoveryjs/json-ext": "^0.5.0", + "@webpack-cli/configtest": "^1.1.1", + "@webpack-cli/info": "^1.4.1", + "@webpack-cli/serve": "^1.6.1", + "colorette": "^2.0.14", + "commander": "^7.0.0", + "execa": "^5.0.0", + "fastest-levenshtein": "^1.0.12", + "import-local": "^3.0.2", + "interpret": "^2.2.0", + "rechoir": "^0.7.0", + "webpack-merge": "^5.7.3" + }, + "bin": { + "webpack-cli": "bin/cli.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "webpack": "4.x.x || 5.x.x" + }, + "peerDependenciesMeta": { + "@webpack-cli/generators": { + "optional": true + }, + "@webpack-cli/migrate": { + "optional": true + }, + "webpack-bundle-analyzer": { + "optional": true + }, + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/webpack-cli/node_modules/interpret": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-2.2.0.tgz", + "integrity": "sha512-Ju0Bz/cEia55xDwUWEa8+olFpCiQoypjnQySseKtmjNrnps3P+xfpUmGr90T7yjlVJmOtybRvPXhKMbHr+fWnw==", + "dev": true, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/webpack-cli/node_modules/rechoir": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.7.1.tgz", + "integrity": "sha512-/njmZ8s1wVeR6pjTZ+0nCnv8SpZNRMT2D1RLOJQESlYFDBvwpTA4KWJpZ+sBJ4+vhjILRcK7JIFdGCdxEAAitg==", + "dev": true, + "dependencies": { + "resolve": "^1.9.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/webpack-merge": { + "version": "5.8.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", + "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", + "dev": true, + "dependencies": { + "clone-deep": "^4.0.1", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "dev": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", + "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "dev": true, + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wildcard": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", + "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==", + "dev": true + }, + "node_modules/word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.1.tgz", + "integrity": "sha512-nSKUxgAbyioruk6hU87QzVbY279oYT6uiwgDoujth2ju4mJ+TZau7SQBhtbTmUyuNYTuXnSyRn66FV0+eCgcrQ==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, + "node_modules/xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "engines": { + "node": ">=10" } } }, "dependencies": { "@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", + "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", + "dev": true, + "requires": { + "@babel/highlight": "^7.16.7" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz", + "integrity": "sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw==", + "dev": true + }, + "@babel/highlight": { + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", + "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.16.7", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "dev": true + }, + "@eslint/eslintrc": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.3.0.tgz", + "integrity": "sha512-1JTKgrOKAHVivSvOYw+sJOunkBjUOvjqWk1DPja7ZFhIS2mX/4EgTT8M7eTK9jrKhL/FvXXEbQwIs3pg1xp3dg==", + "dev": true, + "requires": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "lodash": "^4.17.20", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + } + } + }, + "@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true + }, + "@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "requires": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + } + }, + "@types/eslint": { + "version": "8.4.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.1.tgz", + "integrity": "sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA==", + "dev": true, + "requires": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "@types/eslint-scope": { + "version": "3.7.3", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.3.tgz", + "integrity": "sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g==", + "dev": true, + "requires": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "@types/estree": { + "version": "0.0.51", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", + "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==", + "dev": true + }, + "@types/json-schema": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", + "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", + "dev": true + }, + "@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", + "dev": true + }, + "@types/minimist": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", + "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==", + "dev": true + }, + "@types/node": { + "version": "17.0.23", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.23.tgz", + "integrity": "sha512-UxDxWn7dl97rKVeVS61vErvw086aCYhDLyvRQZ5Rk65rZKepaFdm53GeqXaKBuOhED4e9uWq34IC3TdSdJJ2Gw==", + "dev": true + }, + "@types/normalize-package-data": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz", + "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", + "dev": true + }, + "@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "dev": true + }, + "@webassemblyjs/ast": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", + "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", "dev": true, "requires": { - "@babel/highlight": "^7.12.13" + "@webassemblyjs/helper-numbers": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1" } }, - "@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==", + "@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", + "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==", "dev": true }, - "@babel/highlight": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.8.tgz", - "integrity": "sha512-4vrIhfJyfNf+lCtXC2ck1rKSzDwciqF7IWFhXXrSOUC2O5DrVp+w4c6ed4AllTxhTkUP5x2tYj41VaxdVMMRDw==", + "@webassemblyjs/helper-api-error": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", + "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==", + "dev": true + }, + "@webassemblyjs/helper-buffer": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", + "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==", + "dev": true + }, + "@webassemblyjs/helper-numbers": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", + "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.12.11", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" + "@webassemblyjs/floating-point-hex-parser": "1.11.1", + "@webassemblyjs/helper-api-error": "1.11.1", + "@xtuc/long": "4.2.2" } }, - "@nodelib/fs.scandir": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz", - "integrity": "sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA==", + "@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", + "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==", + "dev": true + }, + "@webassemblyjs/helper-wasm-section": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", + "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", "dev": true, "requires": { - "@nodelib/fs.stat": "2.0.4", - "run-parallel": "^1.1.9" + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1" } }, - "@nodelib/fs.stat": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz", - "integrity": "sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q==", + "@webassemblyjs/ieee754": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", + "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", + "dev": true, + "requires": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "@webassemblyjs/leb128": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", + "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", + "dev": true, + "requires": { + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/utf8": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", + "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==", + "dev": true + }, + "@webassemblyjs/wasm-edit": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", + "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/helper-wasm-section": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1", + "@webassemblyjs/wasm-opt": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1", + "@webassemblyjs/wast-printer": "1.11.1" + } + }, + "@webassemblyjs/wasm-gen": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", + "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/ieee754": "1.11.1", + "@webassemblyjs/leb128": "1.11.1", + "@webassemblyjs/utf8": "1.11.1" + } + }, + "@webassemblyjs/wasm-opt": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", + "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-buffer": "1.11.1", + "@webassemblyjs/wasm-gen": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", + "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/helper-api-error": "1.11.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.1", + "@webassemblyjs/ieee754": "1.11.1", + "@webassemblyjs/leb128": "1.11.1", + "@webassemblyjs/utf8": "1.11.1" + } + }, + "@webassemblyjs/wast-printer": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", + "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.11.1", + "@xtuc/long": "4.2.2" + } + }, + "@webpack-cli/configtest": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-1.1.1.tgz", + "integrity": "sha512-1FBc1f9G4P/AxMqIgfZgeOTuRnwZMten8E7zap5zgpPInnCrP8D4Q81+4CWIch8i/Nf7nXjP0v6CjjbHOrXhKg==", + "dev": true, + "requires": {} + }, + "@webpack-cli/info": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-1.4.1.tgz", + "integrity": "sha512-PKVGmazEq3oAo46Q63tpMr4HipI3OPfP7LiNOEJg963RMgT0rqheag28NCML0o3GIzA3DmxP1ZIAv9oTX1CUIA==", + "dev": true, + "requires": { + "envinfo": "^7.7.3" + } + }, + "@webpack-cli/serve": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-1.6.1.tgz", + "integrity": "sha512-gNGTiTrjEVQ0OcVnzsRSqTxaBSr+dmTfm+qJsCDluky8uhdLWep7Gcr62QsAKHTMxjCS/8nEITsmFAhfIx+QSw==", + "dev": true, + "requires": {} + }, + "@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true + }, + "@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true + }, + "acorn": { + "version": "8.7.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.7.0.tgz", + "integrity": "sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ==", + "dev": true + }, + "acorn-import-assertions": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", + "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", + "dev": true, + "requires": {} + }, + "acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "requires": {} + }, + "ajv": { + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", + "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "requires": { + "ajv": "^8.0.0" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true + }, + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "array-includes": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "is-string": "^1.0.7" + } + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true + }, + "array.prototype.flat": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + } + }, + "array.prototype.flatmap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.5.tgz", + "integrity": "sha512-08u6rVyi1Lj7oqWbS9nUxliETrtIROT4XGTA4D/LWGten6E3ocm7cy9SIrmNHOL5XVbVuckUp3X6Xyg8/zpvHA==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + } + }, + "arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", + "dev": true + }, + "astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true + }, + "autoprefixer": { + "version": "10.4.4", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.4.tgz", + "integrity": "sha512-Tm8JxsB286VweiZ5F0anmbyGiNI3v3wGv3mz9W+cxEDYB/6jbnj6GM9H9mK3wIL8ftgl+C07Lcwb8PG5PCCPzA==", + "dev": true, + "requires": { + "browserslist": "^4.20.2", + "caniuse-lite": "^1.0.30001317", + "fraction.js": "^4.2.0", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + } + }, + "balanced-match": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-2.0.0.tgz", + "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + }, + "dependencies": { + "balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + } + } + }, + "browserslist": { + "version": "4.20.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.20.2.tgz", + "integrity": "sha512-CQOBCqp/9pDvDbx3xfMi+86pr4KXIf2FDkTTdeuYw8OxS9t898LA1Khq57gtufFILXpfgsSx5woNgsBgvGjpsA==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001317", + "electron-to-chromium": "^1.4.84", + "escalade": "^3.1.1", + "node-releases": "^2.0.2", + "picocolors": "^1.0.0" + } + }, + "buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + } + }, + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "camelcase-keys": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", + "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + } + }, + "caniuse-lite": { + "version": "1.0.30001322", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001322.tgz", + "integrity": "sha512-neRmrmIrCGuMnxGSoh+x7zYtQFFgnSY2jaomjU56sCkTA6JINqQrxutF459JpWcWRajvoyn95sOXq4Pqrnyjew==", + "dev": true + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", "dev": true }, - "@nodelib/fs.walk": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz", - "integrity": "sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow==", + "clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "dependencies": { + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true + } + } + }, + "clone-regexp": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clone-regexp/-/clone-regexp-2.2.0.tgz", + "integrity": "sha512-beMpP7BOtTipFuW8hrJvREQ2DrRu3BE7by0ZpibtfBA+qfHYvMGTc2Yb1JMYPKg/JUw0CHYvpg796aNTSW9z7Q==", + "dev": true, + "requires": { + "is-regexp": "^2.0.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, "requires": { - "@nodelib/fs.scandir": "2.1.4", - "fastq": "^1.6.0" + "color-name": "1.1.3" } }, - "@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, - "@types/minimist": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.1.tgz", - "integrity": "sha512-fZQQafSREFyuZcdWFAExYjBiCL7AUCdgsk80iO0q4yihYYdcIiH28CcuPTGFgLOCC8RlW49GSQxdHwZP+I7CNg==", + "colord": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.2.tgz", + "integrity": "sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ==", "dev": true }, - "@types/normalize-package-data": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", - "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", + "colorette": { + "version": "2.0.16", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.16.tgz", + "integrity": "sha512-hUewv7oMjCp+wkBv5Rm0v87eJhq4woh5rSR+42YSQJKecCqgIqNkZ6lAlQms/BwHPJA5NKMRlpxPRv0n8HQW6g==", "dev": true }, - "@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", "dev": true }, - "ajv": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.9.0.tgz", - "integrity": "sha512-qOKJyNj/h+OWx7s5DePL6Zu1KeM9jPZhwBqs+7DzP6bGOvqzVCSf0xueYmVuaC/oQ/VtS2zLMLHdQFbkka+XDQ==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", "dev": true }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "cosmiconfig": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz", + "integrity": "sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==", "dev": true, "requires": { - "color-convert": "^1.9.0" + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" } }, - "anymatch": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", - "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "dependencies": { + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } } }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", - "dev": true - }, - "astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "css-functions-list": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.0.1.tgz", + "integrity": "sha512-PriDuifDt4u4rkDgnqRCLnjfMatufLmWNfQnGCq34xZwpY3oabwhB9SqRBmuvWUgndbemCFlKqg+nO7C2q0SBw==", "dev": true }, - "autoprefixer": { - "version": "10.4.2", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.2.tgz", - "integrity": "sha512-9fOPpHKuDW1w/0EKfRmVnxTDt8166MAnLI3mgZ1JCnhNtYWxcJ6Ud5CO/AVOZi/AvFa8DY9RTy3h3+tFBlrrdQ==", + "css-loader": { + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.7.1.tgz", + "integrity": "sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw==", "dev": true, "requires": { - "browserslist": "^4.19.1", - "caniuse-lite": "^1.0.30001297", - "fraction.js": "^4.1.2", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" + "icss-utils": "^5.1.0", + "postcss": "^8.4.7", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.0", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.5" } }, - "balanced-match": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-2.0.0.tgz", - "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", + "cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", "dev": true }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } }, - "boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", "dev": true }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "decamelize-keys": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", + "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", "dev": true, "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" }, "dependencies": { - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", "dev": true } } }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", "dev": true, "requires": { - "fill-range": "^7.0.1" + "object-keys": "^1.0.12" } }, - "browserslist": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.19.1.tgz", - "integrity": "sha512-u2tbbG5PdKRTUoctO3NBD8FQ5HdPh1ZXPHzp1rwaa5jTc+RV9/+RlWiAIKmjRPQF+xbGM9Kklj5bZQFa2s/38A==", + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dev": true, "requires": { - "caniuse-lite": "^1.0.30001286", - "electron-to-chromium": "^1.4.17", - "escalade": "^3.1.1", - "node-releases": "^2.0.1", - "picocolors": "^1.0.0" + "path-type": "^4.0.0" } }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "electron-to-chromium": { + "version": "1.4.100", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.100.tgz", + "integrity": "sha512-pNrSE2naf8fizl6/Uxq8UbKb8hU9EiYW4OzCYswosXoLV5NTMOUVKECNzDaHiUubsPq/kAckOzZd7zd8S8CHVw==", "dev": true }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true }, - "camelcase-keys": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", - "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "enhanced-resolve": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.9.2.tgz", + "integrity": "sha512-GIm3fQfwLJ8YZx2smuHpBKkXC1yOk+OBEmKckVyL0i/ea8mqDEykK3ld5dgH1QYPNyT/lIllxV2LULnxCHaHkA==", "dev": true, "requires": { - "camelcase": "^5.3.1", - "map-obj": "^4.0.0", - "quick-lru": "^4.0.1" + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" } }, - "caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", "dev": true, "requires": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" + "ansi-colors": "^4.1.1" } }, - "caniuse-lite": { - "version": "1.0.30001309", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001309.tgz", - "integrity": "sha512-Pl8vfigmBXXq+/yUz1jUwULeq9xhMJznzdc/xwl4WclDAuebcTHVefpz8lE/bMI+UN7TOkSSe7B7RnZd6+dzjA==", + "envinfo": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", + "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", "dev": true }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", "dev": true, "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" + "is-arrayish": "^0.2.1" } }, - "chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", + "es-abstract": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.2.tgz", + "integrity": "sha512-gfSBJoZdlL2xRiOCy0g8gLMryhoe1TlimjzU99L/31Z8QEGIhVQI+EWwt5lT+AuU9SnorVupXFqqOGqGfsyO6w==", "dev": true, "requires": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" + "call-bind": "^1.0.2", + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.1.1", + "get-symbol-description": "^1.0.0", + "has": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.3", + "is-callable": "^1.2.4", + "is-negative-zero": "^2.0.2", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", + "is-string": "^1.0.7", + "is-weakref": "^1.0.2", + "object-inspect": "^1.12.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.2", + "string.prototype.trimend": "^1.0.4", + "string.prototype.trimstart": "^1.0.4", + "unbox-primitive": "^1.0.1" + } + }, + "es-module-lexer": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", + "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==", + "dev": true + }, + "es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" } }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "eslint": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.18.0.tgz", + "integrity": "sha512-fbgTiE8BfUJZuBeq2Yi7J3RB3WGUQ9PNuNbmgi6jt9Iv8qrkxfy19Ds3OpL1Pm7zg3BtTVhvcUZbIRQ0wmSjAQ==", "dev": true, "requires": { - "string-width": "^4.2.0", + "@babel/code-frame": "^7.0.0", + "@eslint/eslintrc": "^0.3.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.1", + "esquery": "^1.2.0", + "esutils": "^2.0.2", + "file-entry-cache": "^6.0.0", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash": "^4.17.20", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.1.0", + "semver": "^7.2.1", "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" + "strip-json-comments": "^3.1.0", + "table": "^6.0.4", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } } }, - "clone-regexp": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clone-regexp/-/clone-regexp-2.2.0.tgz", - "integrity": "sha512-beMpP7BOtTipFuW8hrJvREQ2DrRu3BE7by0ZpibtfBA+qfHYvMGTc2Yb1JMYPKg/JUw0CHYvpg796aNTSW9z7Q==", + "eslint-config-standard": { + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.3.tgz", + "integrity": "sha512-x4fmJL5hGqNJKGHSjnLdgA6U6h1YW/G2dW9fA+cyVur4SK6lyue8+UgNKWlZtUDTXvgKDD/Oa3GQjmB5kjtVvg==", "dev": true, - "requires": { - "is-regexp": "^2.0.0" - } + "requires": {} }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "eslint-config-standard-jsx": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", + "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "colord": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.2.tgz", - "integrity": "sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ==", - "dev": true - }, - "commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true + "requires": {} }, - "cosmiconfig": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz", - "integrity": "sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==", + "eslint-import-resolver-node": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", + "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", "dev": true, "requires": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" + "debug": "^3.2.7", + "resolve": "^1.20.0" + }, + "dependencies": { + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + } } }, - "css-declaration-sorter": { - "version": "6.1.4", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.1.4.tgz", - "integrity": "sha512-lpfkqS0fctcmZotJGhnxkIyJWvBXgpyi2wsFd4J8VB7wzyrT6Ch/3Q+FMNJpjK4gu1+GN5khOnpU2ZVKrLbhCw==", + "eslint-module-utils": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.3.tgz", + "integrity": "sha512-088JEC7O3lDZM9xGe0RerkOMd0EjFl+Yvd1jPWIkMT5u3H9+HC34mWWPnqPrN13gieT9pBOO+Qt07Nb/6TresQ==", "dev": true, "requires": { - "timsort": "^0.3.0" + "debug": "^3.2.7", + "find-up": "^2.1.0" + }, + "dependencies": { + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + } } }, - "css-functions-list": { + "eslint-plugin-es": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.0.1.tgz", - "integrity": "sha512-PriDuifDt4u4rkDgnqRCLnjfMatufLmWNfQnGCq34xZwpY3oabwhB9SqRBmuvWUgndbemCFlKqg+nO7C2q0SBw==", - "dev": true - }, - "css-select": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.2.1.tgz", - "integrity": "sha512-/aUslKhzkTNCQUB2qTX84lVmfia9NyjP3WpDGtj/WxhwBzWBYUV3DgUpurHTme8UTPcPlAD1DJ+b0nN/t50zDQ==", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", "dev": true, "requires": { - "boolbase": "^1.0.0", - "css-what": "^5.1.0", - "domhandler": "^4.3.0", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" + "eslint-utils": "^2.0.0", + "regexpp": "^3.0.0" } }, - "css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "eslint-plugin-import": { + "version": "2.24.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.24.2.tgz", + "integrity": "sha512-hNVtyhiEtZmpsabL4neEj+6M5DCLgpYyG9nzJY8lZQeQXEn5UPW1DpUdsMHMXsq98dbNm7nt1w9ZMSVpfJdi8Q==", "dev": true, "requires": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" + "array-includes": "^3.1.3", + "array.prototype.flat": "^1.2.4", + "debug": "^2.6.9", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-module-utils": "^2.6.2", + "find-up": "^2.0.0", + "has": "^1.0.3", + "is-core-module": "^2.6.0", + "minimatch": "^3.0.4", + "object.values": "^1.1.4", + "pkg-up": "^2.0.0", + "read-pkg-up": "^3.0.0", + "resolve": "^1.20.0", + "tsconfig-paths": "^3.11.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, + "load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "dev": true, + "requires": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", + "dev": true, + "requires": { + "pify": "^3.0.0" + } + }, + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true + }, + "read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", + "dev": true, + "requires": { + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" + } + }, + "read-pkg-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", + "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", + "dev": true, + "requires": { + "find-up": "^2.0.0", + "read-pkg": "^3.0.0" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + } } }, - "css-what": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.1.0.tgz", - "integrity": "sha512-arSMRWIIFY0hV8pIxZMEfmMI47Wj3R/aWpZDDxWYCPEiOMv6tfOrnpDtgxBYPEQD4V0Y/958+1TdC3iWTFcUPw==", - "dev": true - }, - "cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "dev": true - }, - "cssnano": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.0.tgz", - "integrity": "sha512-wWxave1wMlThGg4ueK98jFKaNqXnQd1nVZpSkQ9XvR+YymlzP1ofWqES1JkHtI250LksP9z5JH+oDcrKDJezAg==", + "eslint-plugin-node": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", "dev": true, "requires": { - "cssnano-preset-default": "^5.2.0", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", + "ignore": "^5.1.1", + "minimatch": "^3.0.4", + "resolve": "^1.10.1", + "semver": "^6.1.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } } }, - "cssnano-preset-default": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.0.tgz", - "integrity": "sha512-3N5Vcptj2pqVKpHVqH6ezOJvqikR2PdLTbTrsrhF61FbLRQuujAqZ2sKN5rvcMsb7hFjrNnjZT8CGEkxoN/Pwg==", - "dev": true, - "requires": { - "css-declaration-sorter": "^6.0.3", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.0", - "postcss-convert-values": "^5.1.0", - "postcss-discard-comments": "^5.1.0", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.0", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.0", - "postcss-merge-rules": "^5.1.0", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.0", - "postcss-minify-params": "^5.1.0", - "postcss-minify-selectors": "^5.2.0", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.0", - "postcss-normalize-repeat-style": "^5.1.0", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.0", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.0", - "postcss-ordered-values": "^5.1.0", - "postcss-reduce-initial": "^5.1.0", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.0" - } - }, - "cssnano-utils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", - "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "eslint-plugin-promise": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-5.1.1.tgz", + "integrity": "sha512-XgdcdyNzHfmlQyweOPTxmc7pIsS6dE4MvwhXWMQ2Dxs1XAL2GJDilUsjWen6TWik0aSI+zD/PqocZBblcm9rdA==", "dev": true, "requires": {} }, - "csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "eslint-plugin-react": { + "version": "7.25.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.25.3.tgz", + "integrity": "sha512-ZMbFvZ1WAYSZKY662MBVEWR45VaBT6KSJCiupjrNlcdakB90juaZeDCbJq19e73JZQubqFtgETohwgAt8u5P6w==", "dev": true, "requires": { - "css-tree": "^1.1.2" + "array-includes": "^3.1.3", + "array.prototype.flatmap": "^1.2.4", + "doctrine": "^2.1.0", + "estraverse": "^5.2.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.0.4", + "object.entries": "^1.1.4", + "object.fromentries": "^2.0.4", + "object.hasown": "^1.0.0", + "object.values": "^1.1.4", + "prop-types": "^15.7.2", + "resolve": "^2.0.0-next.3", + "string.prototype.matchall": "^4.0.5" + }, + "dependencies": { + "doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true + }, + "resolve": { + "version": "2.0.0-next.3", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.3.tgz", + "integrity": "sha512-W8LucSynKUIDu9ylraa7ueVZ7hc0uAgJBxVsQSKOXOyle8a93qXhcz+XAXZ8bIq2d6i4Ehddn6Evt+0/UwKk6Q==", + "dev": true, + "requires": { + "is-core-module": "^2.2.0", + "path-parse": "^1.0.6" + } + } } }, - "debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "dev": true, "requires": { - "ms": "2.1.2" + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" } }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true - }, - "decamelize-keys": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", - "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", + "eslint-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "dev": true, "requires": { - "decamelize": "^1.1.0", - "map-obj": "^1.0.0" + "eslint-visitor-keys": "^1.1.0" }, "dependencies": { - "map-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", "dev": true } } }, - "dependency-graph": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz", - "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", + "eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", "dev": true }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "dom-serializer": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", - "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", + "espree": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", + "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", "dev": true, "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" + "acorn": "^7.4.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^1.3.0" + }, + "dependencies": { + "acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true + }, + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true + } } }, - "domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true }, - "domhandler": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.0.tgz", - "integrity": "sha512-fC0aXNQXqKSFTr2wDNZDhsEYjCiYsDWl3D01kwt25hm1YIPyDGHvvi3rw+PLqHAl/m71MaiF7d5zvBr0p5UB2g==", + "esquery": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", + "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", "dev": true, "requires": { - "domelementtype": "^2.2.0" + "estraverse": "^5.1.0" + }, + "dependencies": { + "estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true + } } }, - "domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "requires": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true + } } }, - "electron-to-chromium": { - "version": "1.4.31", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.31.tgz", - "integrity": "sha512-t3XVQtk+Frkv6aTD4RRk0OqosU+VLe1dQFW83MDer78ZD6a52frgXuYOIsLYTQiH2Lm+JB2OKYcn7zrX+YGAiQ==", + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", "dev": true }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true }, - "entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "dev": true }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, "requires": { - "is-arrayish": "^0.2.1" + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" } }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, "execall": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/execall/-/execall-2.0.0.tgz", @@ -3914,8 +7170,65 @@ "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" + }, + "dependencies": { + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "requires": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + } } }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, "fastest-levenshtein": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz", @@ -3923,9 +7236,9 @@ "dev": true }, "fastq": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz", - "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==", + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", + "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", "dev": true, "requires": { "reusify": "^1.0.4" @@ -3940,15 +7253,6 @@ "flat-cache": "^3.0.4" } }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, "find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", @@ -3970,63 +7274,72 @@ } }, "flatted": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.1.1.tgz", - "integrity": "sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA==", + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.5.tgz", + "integrity": "sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg==", "dev": true }, "fraction.js": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.1.2.tgz", - "integrity": "sha512-o2RiJQ6DZaR/5+Si0qJUIy637QMRudSi9kU/FFzx9EZazrIdnBgpU+3sEWCxAVhH2RtxW2Oz+T4p2o8uOPVcgA==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", + "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==", "dev": true }, - "fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", "dev": true }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "dev": true, - "optional": true - }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", "dev": true }, + "get-intrinsic": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", + "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" + } + }, "get-stdin": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", "dev": true }, + "get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true + }, + "get-symbol-description": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", + "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + } + }, "glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", "dev": true, "requires": { "fs.realpath": "^1.0.0", @@ -4046,52 +7359,41 @@ "is-glob": "^4.0.1" } }, - "global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", - "dev": true, - "requires": { - "global-prefix": "^3.0.0" - } + "glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "dev": true }, - "global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", "dev": true, "requires": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" + "type-fest": "^0.8.1" + }, + "dependencies": { + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + } } }, "globby": { - "version": "12.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-12.0.2.tgz", - "integrity": "sha512-lAsmb/5Lww4r7MM9nCCliDZVIKbZTavrsunAsHLr9oHthrZP1qi7/gAnHOsUs9bLvEt2vKVJhHmxuL7QbDuPdQ==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", "dev": true, "requires": { - "array-union": "^3.0.1", + "array-union": "^2.1.0", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.7", - "ignore": "^5.1.8", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "dependencies": { - "array-union": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-3.0.1.tgz", - "integrity": "sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==", - "dev": true - }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true - } + "slash": "^3.0.0" } }, "globjoin": { @@ -4101,9 +7403,9 @@ "dev": true }, "graceful-fs": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", - "integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==", + "version": "4.2.9", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", + "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==", "dev": true }, "hard-rejection": { @@ -4121,16 +7423,37 @@ "function-bind": "^1.1.1" } }, + "has-bigints": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", + "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", + "dev": true + }, "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", "dev": true }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "dev": true + }, + "has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "dev": true, + "requires": { + "has-symbols": "^1.0.2" + } + }, "hosted-git-info": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.0.2.tgz", - "integrity": "sha512-c9OGXbZ3guC/xOlCg1Ci/VgWlwsqDv1yMQL1CWqXDL0hDjXuNcq0zuR4xqPSuasI3kqFDhqSyTjREz5gzq0fXg==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", + "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", "dev": true, "requires": { "lru-cache": "^6.0.0" @@ -4142,6 +7465,19 @@ "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", "dev": true }, + "human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true + }, + "icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "dev": true, + "requires": {} + }, "ignore": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", @@ -4154,15 +7490,6 @@ "integrity": "sha512-zIE9hX70qew5qTUjSS7wi1iwj/l7+m54KWU247nhM3v806UdGj1yDndXj+IOYxxtW9zyLI+xqFNZjTuDaLUqFw==", "dev": true }, - "import-cwd": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-3.0.0.tgz", - "integrity": "sha512-4pnzH16plW+hgvRECbDWpQl3cqtvSofHWh44met7ESfZ8UZOWWddm8hEyDTqREJ9RbYHY8gi8DqmaelApoOGMg==", - "dev": true, - "requires": { - "import-from": "^3.0.0" - } - }, "import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", @@ -4171,21 +7498,12 @@ "requires": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" - } - }, - "import-from": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/import-from/-/import-from-3.0.0.tgz", - "integrity": "sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ==", - "dev": true, - "requires": { - "resolve-from": "^5.0.0" }, "dependencies": { "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true } } @@ -4196,6 +7514,16 @@ "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", "dev": true }, + "import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dev": true, + "requires": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + } + }, "imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", @@ -4230,30 +7558,66 @@ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", "dev": true }, + "internal-slot": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", + "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", + "dev": true, + "requires": { + "get-intrinsic": "^1.1.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" + } + }, "is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", "dev": true }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "is-bigint": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", "dev": true, "requires": { - "binary-extensions": "^2.0.0" + "has-bigints": "^1.0.1" } }, + "is-boolean-object": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, + "is-callable": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz", + "integrity": "sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==", + "dev": true + }, "is-core-module": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz", - "integrity": "sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.1.tgz", + "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", "dev": true, "requires": { "has": "^1.0.3" } }, + "is-date-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -4267,18 +7631,33 @@ "dev": true }, "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, "requires": { "is-extglob": "^2.1.1" } }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "is-negative-zero": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", + "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", + "dev": true + }, + "is-number-object": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.6.tgz", + "integrity": "sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", "dev": true }, "is-plain-object": { @@ -4287,24 +7666,123 @@ "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", "dev": true }, + "is-regex": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, "is-regexp": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-2.1.0.tgz", "integrity": "sha512-OZ4IlER3zmRIoB9AqNhEggVxqIH4ofDns5nRrPS6yQxXE1TPCUpFznBfRQmQa8uC+pXqjMnukiJBxCisIxiLGA==", "dev": true }, + "is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==", + "dev": true + }, + "is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true + }, + "is-string": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-symbol": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", + "dev": true, + "requires": { + "has-symbols": "^1.0.2" + } + }, + "is-weakref": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", + "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.2" + } + }, "isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", "dev": true }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "dev": true + }, + "jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dev": true, + "requires": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "dependencies": { + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, "js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", "dev": true }, + "js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, "json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", @@ -4317,20 +7795,35 @@ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "dev": true }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true + }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", "dev": true, "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" + "minimist": "^1.2.0" } }, - "kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "jsx-ast-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.1.tgz", + "integrity": "sha512-uP5vu8xfy2F9A6LGC22KO7e2/vGTS1MhP+18f++ZNlf0Ohaxbc9nIEwHAsejlJKyzfZzU5UIhe5ItYkitcZnZA==", + "dev": true, + "requires": { + "array-includes": "^3.1.3", + "object.assign": "^4.1.2" + } + }, + "klona": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", + "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==", "dev": true }, "known-css-properties": { @@ -4339,16 +7832,57 @@ "integrity": "sha512-RTSoaUAfLvpR357vWzAz/50Q/BmHfmE6ETSWfutT0AJiw10e6CmcdYRQJlLRd95B53D0Y2aD1jSxD3V3ySF+PA==", "dev": true }, - "lilconfig": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.0.3.tgz", - "integrity": "sha512-EHKqr/+ZvdKCifpNrJCKxBTgk5XupZA3y/aCPY9mxfgBzmgh93Mt/WqjjQ38oMxXuvDokaKiM3lAgvSH2sjtHg==", - "dev": true + "levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + } }, "lines-and-columns": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", - "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "load-json-file": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", + "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.15", + "parse-json": "^4.0.0", + "pify": "^4.0.1", + "strip-bom": "^3.0.0", + "type-fest": "^0.3.0" + }, + "dependencies": { + "parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "dev": true, + "requires": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + } + }, + "type-fest": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", + "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", + "dev": true + } + } + }, + "loader-runner": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.2.0.tgz", + "integrity": "sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw==", "dev": true }, "locate-path": { @@ -4366,53 +7900,20 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "dev": true }, - "lodash.difference": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.difference/-/lodash.difference-4.5.0.tgz", - "integrity": "sha1-nMtOUF1Ia5FlE0V3KIWi3yf9AXw=", - "dev": true - }, - "lodash.forown": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.forown/-/lodash.forown-4.4.0.tgz", - "integrity": "sha1-hRFc8E9z75ZuztUlEdOJPMRmg68=", - "dev": true - }, - "lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "lodash.groupby": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz", - "integrity": "sha1-Cwih3PaDl8OXhVwyOXg4Mt90A9E=", - "dev": true - }, - "lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=", - "dev": true - }, - "lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=", - "dev": true - }, "lodash.truncate": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", "dev": true }, - "lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=", - "dev": true + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } }, "lru-cache": { "version": "6.0.0", @@ -4424,9 +7925,9 @@ } }, "map-obj": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.2.1.tgz", - "integrity": "sha512-+WA2/1sPmDj1dlvvJmB5G6JKfY9dpn7EVBUL06+y6PoljPkh+6V1QihwxNkbcGxCRjt2b0F9K0taiCuo7MbdFQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", + "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", "dev": true }, "mathml-tag-names": { @@ -4435,12 +7936,6 @@ "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", "dev": true }, - "mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==", - "dev": true - }, "meow": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz", @@ -4461,37 +7956,83 @@ "yargs-parser": "^20.2.3" } }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, "merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true }, - "micromatch": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", - "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dev": true, "requires": { - "braces": "^3.0.1", - "picomatch": "^2.2.3" + "mime-db": "1.52.0" } }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + }, "min-indent": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", "dev": true }, + "mini-css-extract-plugin": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.0.tgz", + "integrity": "sha512-ndG8nxCEnAemsg4FSgS+yNyHKgkTB4nPKqCOgh65j3/30qqC5RaSQQXMm++Y6sb6E1zRSxPkztj9fqxhS1Eo6w==", + "dev": true, + "requires": { + "schema-utils": "^4.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", + "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.8.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.0.0" + } + } + } + }, "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "requires": { "brace-expansion": "^1.1.7" } }, + "minimist": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", + "dev": true + }, "minimist-options": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", @@ -4503,10 +8044,10 @@ "kind-of": "^6.0.3" }, "dependencies": { - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true } } @@ -4518,38 +8059,39 @@ "dev": true }, "nanoid": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz", - "integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.2.tgz", + "integrity": "sha512-CuHBogktKwpm5g2sRgv83jEy2ijFzBwMoYA60orPDR7ynsLijJDqgsi4RDGj3OJpy3Ieb+LYwiRmIOGyytgITA==", + "dev": true + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true + }, + "neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", "dev": true }, "node-releases": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.1.tgz", - "integrity": "sha512-CqyzN6z7Q6aMeF/ktcMVTzhAHCEpf8SOarwpzpf8pNBY2k5/oM34UHldUwp8VKI7uxct2HxSRdJjBaZeESzcxA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.2.tgz", + "integrity": "sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg==", "dev": true }, "normalize-package-data": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.2.tgz", - "integrity": "sha512-6CdZocmfGaKnIHPVFhJJZ3GuR8SsLKvDANFp47Jmy51aKIr8akjAWTSxtpI+MBgBFdSMRyo4hMpDlT6dTffgZg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz", + "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==", "dev": true, "requires": { "hosted-git-info": "^4.0.1", - "resolve": "^1.20.0", + "is-core-module": "^2.5.0", "semver": "^7.3.4", "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - } } }, "normalize-path": { @@ -4570,19 +8112,86 @@ "integrity": "sha1-0LFF62kRicY6eNIB3E/bEpPvDAM=", "dev": true }, - "normalize-url": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", "dev": true }, - "nth-check": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", - "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", + "object-inspect": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.0.tgz", + "integrity": "sha512-Ho2z80bVIvJloH+YzRmpZVQe87+qASmBUKZDWgx9cu+KDrX2ZDH/3tMy+gXbZETVGs2M8YdxObOh7XAtim9Y0g==", + "dev": true + }, + "object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true + }, + "object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + } + }, + "object.entries": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz", + "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" + } + }, + "object.fromentries": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz", + "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" + } + }, + "object.hasown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.0.tgz", + "integrity": "sha512-MhjYRfj3GBlhSkDHo6QmvgjRLXQ2zndabdf3nX0yTyZK9rPfxb6uRpAac8HXNLy1GpqWtZ81Qh4v3uOls2sRAg==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" + } + }, + "object.values": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "dev": true, "requires": { - "boolbase": "^1.0.0" + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1" } }, "once": { @@ -4594,6 +8203,29 @@ "wrappy": "1" } }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "optionator": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "dev": true, + "requires": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" + } + }, "p-limit": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", @@ -4651,6 +8283,12 @@ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", "dev": true }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, "path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", @@ -4665,336 +8303,199 @@ }, "picocolors": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", - "dev": true - }, - "picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", - "dev": true - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "postcss": { - "version": "8.4.7", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.7.tgz", - "integrity": "sha512-L9Ye3r6hkkCeOETQX6iOaWZgjp3LL6Lpqm6EtgbKrgqGGteRMNb9vzBfRL96YOSu8o7x3MfIH9Mo5cPJFGrW6A==", - "dev": true, - "requires": { - "nanoid": "^3.3.1", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" - } - }, - "postcss-calc": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", - "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-cli": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-9.1.0.tgz", - "integrity": "sha512-zvDN2ADbWfza42sAnj+O2uUWyL0eRL1V+6giM2vi4SqTR3gTYy8XzcpfwccayF2szcUif0HMmXiEaDv9iEhcpw==", - "dev": true, - "requires": { - "chokidar": "^3.3.0", - "dependency-graph": "^0.11.0", - "fs-extra": "^10.0.0", - "get-stdin": "^9.0.0", - "globby": "^12.0.0", - "picocolors": "^1.0.0", - "postcss-load-config": "^3.0.0", - "postcss-reporter": "^7.0.0", - "pretty-hrtime": "^1.0.3", - "read-cache": "^1.0.0", - "slash": "^4.0.0", - "yargs": "^17.0.0" - }, - "dependencies": { - "get-stdin": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-9.0.0.tgz", - "integrity": "sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==", - "dev": true - }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true - } - } - }, - "postcss-colormin": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.0.tgz", - "integrity": "sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg==", - "dev": true, - "requires": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "colord": "^2.9.1", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-convert-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.0.tgz", - "integrity": "sha512-GkyPbZEYJiWtQB0KZ0X6qusqFHUepguBCNFi9t5JJc7I2OTXG7C0twbTLvCfaKOLl3rSXmpAwV7W5txd91V84g==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-discard-comments": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.0.tgz", - "integrity": "sha512-L0IKF4jAshRyn03SkEO6ar/Ipz2oLywVbg2THf2EqqdNkBwmVMxuTR/RoAltOw4piiaLt3gCAdrbAqmTBInmhg==", - "dev": true, - "requires": {} - }, - "postcss-discard-duplicates": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", - "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", - "dev": true, - "requires": {} - }, - "postcss-discard-empty": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.0.tgz", - "integrity": "sha512-782T/buGgb3HOuHOJAHpdyKzAAKsv/BxWqsutnZ+QsiHEcDkY7v+6WWdturuBiSal6XMOO1p1aJvwXdqLD5vhA==", - "dev": true, - "requires": {} - }, - "postcss-discard-overridden": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", - "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", - "dev": true, - "requires": {} - }, - "postcss-load-config": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.0.tgz", - "integrity": "sha512-ipM8Ds01ZUophjDTQYSVP70slFSYg3T0/zyfII5vzhN6V57YSxMgG5syXuwi5VtS8wSf3iL30v0uBdoIVx4Q0g==", - "dev": true, - "requires": { - "import-cwd": "^3.0.0", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" - } - }, - "postcss-media-query-parser": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", - "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", - "dev": true - }, - "postcss-merge-longhand": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.0.tgz", - "integrity": "sha512-Gr46srN2tsLD8fudKYoHO56RG0BLQ2nsBRnSZGY04eNBPwTeWa9KeHrbL3tOLAHyB2aliikycPH2TMJG1U+W6g==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.0" - } - }, - "postcss-merge-rules": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.0.tgz", - "integrity": "sha512-NecukEJovQ0mG7h7xV8wbYAkXGTO3MPKnXvuiXzOKcxoOodfTTKYjeo8TMhAswlSkjcPIBlnKbSFcTuVSDaPyQ==", - "dev": true, - "requires": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-minify-font-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", - "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-gradients": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.0.tgz", - "integrity": "sha512-J/TMLklkONn3LuL8wCwfwU8zKC1hpS6VcxFkNUNjmVt53uKqrrykR3ov11mdUYyqVMEx67slMce0tE14cE4DTg==", - "dev": true, - "requires": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-params": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.0.tgz", - "integrity": "sha512-q67dcts4Hct6x8+JmhBgctHkbvUsqGIg2IItenjE63iZXMbhjr7AlVZkNnKtIGt/1Wsv7p/7YzeSII6Q+KPXRg==", - "dev": true, - "requires": { - "browserslist": "^4.16.6", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-selectors": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.0.tgz", - "integrity": "sha512-vYxvHkW+iULstA+ctVNx0VoRAR4THQQRkG77o0oa4/mBS0OzGvvzLIvHDv/nNEM0crzN2WIyFU5X7wZhaUK3RA==", - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-nested": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.6.tgz", - "integrity": "sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA==", - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.6" - } - }, - "postcss-normalize-charset": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", - "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", - "dev": true, - "requires": {} + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true }, - "postcss-normalize-display-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", - "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0" - } + "picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true }, - "postcss-normalize-positions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.0.tgz", - "integrity": "sha512-8gmItgA4H5xiUxgN/3TVvXRoJxkAWLW6f/KKhdsH03atg0cB8ilXnrB5PpSshwVu/dD2ZsRFQcR1OEmSBDAgcQ==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0" - } + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true }, - "postcss-normalize-repeat-style": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.0.tgz", - "integrity": "sha512-IR3uBjc+7mcWGL6CtniKNQ4Rr5fTxwkaDHwMBDGGs1x9IVRkYIT/M4NelZWkAOBdV6v3Z9S46zqaKGlyzHSchw==", + "pkg-conf": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-3.1.0.tgz", + "integrity": "sha512-m0OTbR/5VPNPqO1ph6Fqbj7Hv6QU7gR/tQW40ZqrL1rjgCU85W6C1bJn0BItuJqnR98PWzw7Z8hHeChD1WrgdQ==", "dev": true, "requires": { - "postcss-value-parser": "^4.2.0" + "find-up": "^3.0.0", + "load-json-file": "^5.2.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + } } }, - "postcss-normalize-string": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", - "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, "requires": { - "postcss-value-parser": "^4.2.0" + "find-up": "^4.0.0" } }, - "postcss-normalize-timing-functions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", - "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "pkg-up": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", + "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", "dev": true, "requires": { - "postcss-value-parser": "^4.2.0" + "find-up": "^2.1.0" + }, + "dependencies": { + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + } } }, - "postcss-normalize-unicode": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.0.tgz", - "integrity": "sha512-J6M3MizAAZ2dOdSjy2caayJLQT8E8K9XjLce8AUQMwOrCvjCHv24aLC/Lps1R1ylOfol5VIDMaM/Lo9NGlk1SQ==", + "postcss": { + "version": "8.4.12", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.12.tgz", + "integrity": "sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg==", "dev": true, "requires": { - "browserslist": "^4.16.6", - "postcss-value-parser": "^4.2.0" + "nanoid": "^3.3.1", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" } }, - "postcss-normalize-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", - "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "postcss-loader": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-6.2.1.tgz", + "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", "dev": true, "requires": { - "normalize-url": "^6.0.1", - "postcss-value-parser": "^4.2.0" + "cosmiconfig": "^7.0.0", + "klona": "^2.0.5", + "semver": "^7.3.5" } }, - "postcss-normalize-whitespace": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.0.tgz", - "integrity": "sha512-7O1FanKaJkpWFyCghFzIkLhehujV/frGkdofGLwhg5upbLyGsSfiTcZAdSzoPsSUgyPCkBkNMeWR8yVgPdQybg==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0" - } + "postcss-media-query-parser": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", + "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", + "dev": true }, - "postcss-ordered-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.0.tgz", - "integrity": "sha512-wU4Z4D4uOIH+BUKkYid36gGDJNQtkVJT7Twv8qH6UyfttbbJWyw4/xIPuVEkkCtQLAJ0EdsNSh8dlvqkXb49TA==", + "postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", "dev": true, - "requires": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } + "requires": {} }, - "postcss-reduce-initial": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.0.tgz", - "integrity": "sha512-5OgTUviz0aeH6MtBjHfbr57tml13PuedK/Ecg8szzd4XRMbYxH4572JFG067z+FqBIf6Zp/d+0581glkvvWMFw==", + "postcss-modules-local-by-default": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz", + "integrity": "sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==", "dev": true, "requires": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0" + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" } }, - "postcss-reduce-transforms": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", - "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "postcss-modules-scope": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", + "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", "dev": true, "requires": { - "postcss-value-parser": "^4.2.0" + "postcss-selector-parser": "^6.0.4" } }, - "postcss-reporter": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-7.0.4.tgz", - "integrity": "sha512-jY/fnpGSin7kwJeunXbY35STp5O3VIxSFdjee5JkoPQ+FfGH5JW3N+Xe9oAPcL9UkjWjkK+JC72o8XH4XXKdhw==", + "postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", "dev": true, "requires": { - "lodash.difference": "^4.5.0", - "lodash.forown": "^4.4.0", - "lodash.get": "^4.4.2", - "lodash.groupby": "^4.6.0", - "lodash.sortby": "^4.7.0", - "picocolors": "^1.0.0" + "icss-utils": "^5.0.0" } }, "postcss-resolve-nested-selector": { @@ -5010,6 +8511,13 @@ "dev": true, "requires": {} }, + "postcss-scss": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.3.tgz", + "integrity": "sha512-j4KxzWovfdHsyxwl1BxkUal/O4uirvHgdzMKS1aWJBAV0qh2qj5qAZqpeBfVUYGWv+4iK9Az7SPyZ4fyNju1uA==", + "dev": true, + "requires": {} + }, "postcss-selector-parser": { "version": "6.0.9", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz", @@ -5020,37 +8528,35 @@ "util-deprecate": "^1.0.2" } }, - "postcss-svgo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", - "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" - } - }, - "postcss-unique-selectors": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.0.tgz", - "integrity": "sha512-LmUhgGobtpeVJJHuogzjLRwJlN7VH+BL5c9GKMVJSS/ejoyePZkXvNsYUtk//F6vKOGK86gfRS0xH7fXQSDtvA==", - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.5" - } - }, "postcss-value-parser": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", "dev": true }, - "pretty-hrtime": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz", - "integrity": "sha1-t+PqQkNaTJsnWdmeDyAesZWALuE=", + "prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", "dev": true }, + "prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "requires": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, "punycode": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", @@ -5058,9 +8564,9 @@ "dev": true }, "queue-microtask": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.2.tgz", - "integrity": "sha512-dB15eXv3p2jDlbOiNLyMabYg1/sXvppd8DP2J3EOCQ0AkuSXCW2tP7mnVouVLJKgUMY6yP0kcQDVpLCN13h4Xg==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true }, "quick-lru": { @@ -5069,15 +8575,21 @@ "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", "dev": true }, - "read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha1-5mTvMRYRZsl1HNvo28+GtftY93Q=", + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, "requires": { - "pify": "^2.3.0" + "safe-buffer": "^5.1.0" } }, + "react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true + }, "read-pkg": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", @@ -5141,86 +8653,304 @@ } } }, - "readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "requires": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + } + }, + "regexp.prototype.flags": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.1.tgz", + "integrity": "sha512-pMR7hBVUUGI7PMA37m2ofIdQCsomVnas+Jn5UPGAHQ+/LlwKm/aTLJHdasmHRzlfeZwHiAOaRSo2rbBDm3nNUQ==", "dev": true, "requires": { - "picomatch": "^2.2.1" + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" } }, - "redent": { + "regexpp": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", + "dev": true + }, + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true + }, + "resolve": { + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.0.tgz", + "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", + "dev": true, + "requires": { + "is-core-module": "^2.8.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "resolve-cwd": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", - "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "requires": { + "resolve-from": "^5.0.0" + } + }, + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true + }, + "rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "requires": { + "queue-microtask": "^1.2.2" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "sass": { + "version": "1.49.10", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.49.10.tgz", + "integrity": "sha512-w37zfWJwKu4I78U4z63u1mmgoncq+v3iOB4yzQMPyAPVHHawaQSnu9C9ysGQnZEhW609jkcLioJcMCqm75JMdg==", + "dev": true, + "requires": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "dependencies": { + "anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "optional": true + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + } + } + }, + "sass-loader": { + "version": "12.6.0", + "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-12.6.0.tgz", + "integrity": "sha512-oLTaH0YCtX4cfnJZxKSLAyglED0naiYfNG1iXfU5w1LNZ+ukoA5DtyDIN5zmKVZwYNJP4KRc5Y3hkWga+7tYfA==", + "dev": true, + "requires": { + "klona": "^2.0.4", + "neo-async": "^2.6.2" + } + }, + "schema-utils": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", + "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "dev": true, + "requires": {} + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + } + } + }, + "semver": { + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", "dev": true, "requires": { - "indent-string": "^4.0.0", - "strip-indent": "^3.0.0" + "lru-cache": "^6.0.0" } }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true - }, - "require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true - }, - "resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", + "serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", "dev": true, "requires": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" + "randombytes": "^2.1.0" } }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", "dev": true, "requires": { - "glob": "^7.1.3" + "kind-of": "^6.0.2" + }, + "dependencies": { + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true + } } }, - "run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "requires": { - "queue-microtask": "^1.2.2" + "shebang-regex": "^3.0.0" } }, - "sass": { - "version": "1.49.9", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.49.9.tgz", - "integrity": "sha512-YlYWkkHP9fbwaFRZQRXgDi3mXZShslVmmo+FVK3kHLUELHHEYrCmL1x6IUjC7wLS6VuJSAFXRQS/DxdsC4xL1A==", + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", "dev": true, "requires": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" } }, "signal-exit": { @@ -5272,18 +9002,30 @@ } } }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - }, "source-map-js": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", "dev": true }, + "source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, "spdx-correct": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", @@ -5311,9 +9053,9 @@ } }, "spdx-license-ids": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.9.tgz", - "integrity": "sha512-Ki212dKK4ogX+xDo4CtOZBVIwhsKBEfsEEcwmJfLQzirgc2jIWdzg40Unxz/HzEUqM1WFzVlQSMF9kZZ2HboLQ==", + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz", + "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==", "dev": true }, "specificity": { @@ -5322,12 +9064,40 @@ "integrity": "sha512-1klA3Gi5PD1Wv9Q0wUoOQN1IWAuPu0D1U03ThXTr0cJ20+/iq2tHSDnK7Kk/0LXJ1ztUB2/1Os0wKmfyNgUQfg==", "dev": true }, - "stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", "dev": true }, + "standard": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/standard/-/standard-16.0.4.tgz", + "integrity": "sha512-2AGI874RNClW4xUdM+bg1LRXVlYLzTNEkHmTG5mhyn45OhbgwA+6znowkOGYy+WMb5HRyELvtNy39kcdMQMcYQ==", + "dev": true, + "requires": { + "eslint": "~7.18.0", + "eslint-config-standard": "16.0.3", + "eslint-config-standard-jsx": "10.0.0", + "eslint-plugin-import": "~2.24.2", + "eslint-plugin-node": "~11.1.0", + "eslint-plugin-promise": "~5.1.0", + "eslint-plugin-react": "~7.25.1", + "standard-engine": "^14.0.1" + } + }, + "standard-engine": { + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", + "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", + "dev": true, + "requires": { + "get-stdin": "^8.0.0", + "minimist": "^1.2.5", + "pkg-conf": "^3.1.0", + "xdg-basedir": "^4.0.0" + } + }, "string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -5339,6 +9109,42 @@ "strip-ansi": "^6.0.1" } }, + "string.prototype.matchall": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.7.tgz", + "integrity": "sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.3", + "regexp.prototype.flags": "^1.4.1", + "side-channel": "^1.0.4" + } + }, + "string.prototype.trimend": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", + "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + } + }, + "string.prototype.trimstart": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", + "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + } + }, "strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -5348,6 +9154,18 @@ "ansi-regex": "^5.0.1" } }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true + }, "strip-indent": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", @@ -5357,33 +9175,29 @@ "min-indent": "^1.0.0" } }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true + }, "style-search": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/style-search/-/style-search-0.1.0.tgz", "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", "dev": true }, - "stylehacks": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.0.tgz", - "integrity": "sha512-SzLmvHQTrIWfSgljkQCw2++C9+Ne91d/6Sp92I8c5uHTcy/PgeHamwITIbBW9wnFTY/3ZfSXR9HIL6Ikqmcu6Q==", - "dev": true, - "requires": { - "browserslist": "^4.16.6", - "postcss-selector-parser": "^6.0.4" - } - }, "stylelint": { - "version": "14.5.3", - "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.5.3.tgz", - "integrity": "sha512-omHETL+kGHR+fCXFK1SkZD/A+emCP9esggAdWEl8GPjTNeyRYj+H6uetRDcU+7E451zwWiUYGVAX+lApsAZgsQ==", + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.6.1.tgz", + "integrity": "sha512-FfNdvZUZdzh9KDQxDnO7Opp+prKh8OQVuSW8S13cBtxrooCbm6J6royhUeb++53WPMt04VB+ZbOz/QmzAijs6Q==", "dev": true, "requires": { "balanced-match": "^2.0.0", "colord": "^2.9.2", "cosmiconfig": "^7.0.1", "css-functions-list": "^3.0.1", - "debug": "^4.3.3", + "debug": "^4.3.4", "execall": "^2.0.0", "fast-glob": "^3.2.11", "fastest-levenshtein": "^1.0.12", @@ -5404,7 +9218,7 @@ "normalize-path": "^3.0.0", "normalize-selector": "^0.2.0", "picocolors": "^1.0.0", - "postcss": "^8.4.6", + "postcss": "^8.4.12", "postcss-media-query-parser": "^0.2.3", "postcss-resolve-nested-selector": "^0.1.1", "postcss-safe-parser": "^6.0.0", @@ -5422,48 +9236,118 @@ "write-file-atomic": "^4.0.1" }, "dependencies": { - "globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" + "fill-range": "^7.0.1" } }, - "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "requires": { + "global-prefix": "^3.0.0" + } + }, + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true + }, + "micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "requires": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } } } }, "stylelint-config-recommended": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-7.0.0.tgz", - "integrity": "sha512-yGn84Bf/q41J4luis1AZ95gj0EQwRX8lWmGmBwkwBNSkpGSpl66XcPTulxGa/Z91aPoNGuIGBmFkcM1MejMo9Q==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-6.0.0.tgz", + "integrity": "sha512-ZorSSdyMcxWpROYUvLEMm0vSZud2uB7tX1hzBZwvVY9SV/uly4AvvJPPhCcymZL3fcQhEQG5AELmrxWqtmzacw==", "dev": true, "requires": {} }, + "stylelint-config-recommended-scss": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended-scss/-/stylelint-config-recommended-scss-5.0.2.tgz", + "integrity": "sha512-b14BSZjcwW0hqbzm9b0S/ScN2+3CO3O4vcMNOw2KGf8lfVSwJ4p5TbNEXKwKl1+0FMtgRXZj6DqVUe/7nGnuBg==", + "dev": true, + "requires": { + "postcss-scss": "^4.0.2", + "stylelint-config-recommended": "^6.0.0", + "stylelint-scss": "^4.0.0" + } + }, "stylelint-config-standard": { - "version": "25.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-25.0.0.tgz", - "integrity": "sha512-21HnP3VSpaT1wFjFvv9VjvOGDtAviv47uTp3uFmzcN+3Lt+RYRv6oAplLaV51Kf792JSxJ6svCJh/G18E9VnCA==", + "version": "24.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-24.0.0.tgz", + "integrity": "sha512-+RtU7fbNT+VlNbdXJvnjc3USNPZRiRVp/d2DxOF/vBDDTi0kH5RX2Ny6errdtZJH3boO+bmqIYEllEmok4jiuw==", + "dev": true, + "requires": { + "stylelint-config-recommended": "^6.0.0" + } + }, + "stylelint-config-standard-scss": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard-scss/-/stylelint-config-standard-scss-3.0.0.tgz", + "integrity": "sha512-zt3ZbzIbllN1iCmc94e4pDxqpkzeR6CJo5DDXzltshuXr+82B8ylHyMMARNnUYrZH80B7wgY7UkKTYCFM0UUyw==", "dev": true, "requires": { - "stylelint-config-recommended": "^7.0.0" + "stylelint-config-recommended-scss": "^5.0.2", + "stylelint-config-standard": "^24.0.0" } }, "stylelint-scss": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.1.0.tgz", - "integrity": "sha512-BNYTo7MMamhFOlcaAWp2dMpjg6hPyM/FFqfDIYzmYVLMmQJqc8lWRIiTqP4UX5bresj9Vo0dKC6odSh43VP2NA==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.2.0.tgz", + "integrity": "sha512-HHHMVKJJ5RM9pPIbgJ/XA67h9H0407G68Rm69H4fzFbFkyDMcTV1Byep3qdze5+fJ3c0U7mJrbj6S0Fg072uZA==", "dev": true, "requires": { "lodash": "^4.17.21", @@ -5509,27 +9393,18 @@ } } }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true + }, "svg-tags": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", "dev": true }, - "svgo": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", - "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", - "dev": true, - "requires": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" - } - }, "table": { "version": "6.8.0", "resolved": "https://registry.npmjs.org/table/-/table-6.8.0.tgz", @@ -5543,38 +9418,109 @@ "strip-ansi": "^6.0.1" } }, - "timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=", + "tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", "dev": true }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "terser": { + "version": "5.12.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.12.1.tgz", + "integrity": "sha512-NXbs+7nisos5E+yXwAD+y7zrcTkMqb0dEJxIGtSKPdCBzopf7ni4odPul2aechpV7EXNvOudYOX2bb5tln1jbQ==", "dev": true, "requires": { - "is-number": "^7.0.0" + "acorn": "^8.5.0", + "commander": "^2.20.0", + "source-map": "~0.7.2", + "source-map-support": "~0.5.20" + }, + "dependencies": { + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true + }, + "source-map": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", + "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", + "dev": true + } + } + }, + "terser-webpack-plugin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.1.tgz", + "integrity": "sha512-GvlZdT6wPQKbDNW/GDQzZFg/j4vKU96yl2q6mcUkzKOgW4gwf1Z8cZToUCrz31XHlPWH8MVb1r2tFtdDtTGJ7g==", + "dev": true, + "requires": { + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1", + "terser": "^5.7.2" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } } }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true + }, "trim-newlines": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", "dev": true }, + "tsconfig-paths": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", + "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", + "dev": true, + "requires": { + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1" + } + }, "type-fest": { "version": "0.18.1", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", "dev": true }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true + "unbox-primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", + "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "has-bigints": "^1.0.1", + "has-symbols": "^1.0.2", + "which-boxed-primitive": "^1.0.2" + } }, "uri-js": { "version": "4.4.1", @@ -5607,52 +9553,135 @@ "spdx-expression-parse": "^3.0.0" } }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "watchpack": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", + "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", "dev": true, "requires": { - "isexe": "^2.0.0" + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" } }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "webpack": { + "version": "5.70.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.70.0.tgz", + "integrity": "sha512-ZMWWy8CeuTTjCxbeaQI21xSswseF2oNOwc70QSKNePvmxE7XW36i7vpBMYZFAUHPwQiEbNGCEYIOOlyRbdGmxw==", "dev": true, "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^0.0.51", + "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/wasm-edit": "1.11.1", + "@webassemblyjs/wasm-parser": "1.11.1", + "acorn": "^8.4.1", + "acorn-import-assertions": "^1.7.6", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.9.2", + "es-module-lexer": "^0.9.0", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.1.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.1.3", + "watchpack": "^2.3.1", + "webpack-sources": "^3.2.3" + } + }, + "webpack-cli": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-4.9.2.tgz", + "integrity": "sha512-m3/AACnBBzK/kMTcxWHcZFPrw/eQuY4Df1TxvIWfWM2x7mRqBQCqKEd96oCUa9jkapLBaFfRce33eGDb4Pr7YQ==", + "dev": true, + "requires": { + "@discoveryjs/json-ext": "^0.5.0", + "@webpack-cli/configtest": "^1.1.1", + "@webpack-cli/info": "^1.4.1", + "@webpack-cli/serve": "^1.6.1", + "colorette": "^2.0.14", + "commander": "^7.0.0", + "execa": "^5.0.0", + "fastest-levenshtein": "^1.0.12", + "import-local": "^3.0.2", + "interpret": "^2.2.0", + "rechoir": "^0.7.0", + "webpack-merge": "^5.7.3" }, "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } + "interpret": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-2.2.0.tgz", + "integrity": "sha512-Ju0Bz/cEia55xDwUWEa8+olFpCiQoypjnQySseKtmjNrnps3P+xfpUmGr90T7yjlVJmOtybRvPXhKMbHr+fWnw==", + "dev": true }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "rechoir": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.7.1.tgz", + "integrity": "sha512-/njmZ8s1wVeR6pjTZ+0nCnv8SpZNRMT2D1RLOJQESlYFDBvwpTA4KWJpZ+sBJ4+vhjILRcK7JIFdGCdxEAAitg==", "dev": true, "requires": { - "color-name": "~1.1.4" + "resolve": "^1.9.0" } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true } } }, + "webpack-merge": { + "version": "5.8.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", + "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", + "dev": true, + "requires": { + "clone-deep": "^4.0.1", + "wildcard": "^2.0.0" + } + }, + "webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "dev": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-boxed-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", + "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "dev": true, + "requires": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + } + }, + "wildcard": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", + "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==", + "dev": true + }, + "word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "dev": true + }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -5669,10 +9698,10 @@ "signal-exit": "^3.0.7" } }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", "dev": true }, "yallist": { @@ -5687,33 +9716,10 @@ "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", "dev": true }, - "yargs": { - "version": "17.3.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.3.0.tgz", - "integrity": "sha512-GQl1pWyDoGptFPJx9b9L6kmR33TGusZvXIZUT+BOz9f7X2L94oeAskFYLEg/FkhV06zZPBYLvLZRWeYId29lew==", - "dev": true, - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.0.0" - }, - "dependencies": { - "yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-z9kApYUOCwoeZ78rfRYYWdiU/iNL6mwwYlkkZfJoyMR1xps+NEBX5X7XmRpxkZHhXJ6+Ey00IwKxBBSW9FIjyA==", - "dev": true - } - } - }, "yargs-parser": { - "version": "20.2.6", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.6.tgz", - "integrity": "sha512-AP1+fQIWSM/sMiET8fyayjx/J+JmTPt2Mr0FkrgqB4todtfa53sOsrSAcIrJRD5XS20bKUwaDIuMkWKCEiQLKA==", + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", "dev": true } } diff --git a/package.json b/package.json index 01e92b0a..c49109c1 100644 --- a/package.json +++ b/package.json @@ -2,8 +2,10 @@ "name": "django-hijack", "version": "1.0.0", "scripts": { - "build": "postcss hijack/static/hijack/hijack.scss -o \"${BUILD_LIB:-.}/hijack/static/hijack/hijack.min.css\"", - "build:watch": "postcss hijack/static/hijack/hijack.scss -o hijack/static/hijack/hijack.min.css -w", + "build": "webpack --mode=production", + "build:watch": "webpack --mode=production --watch", + "lint": "npm run lint:js && npm run lint:scss", + "lint:js": "standard \"**/*.js\"", "lint:scss": "stylelint \"**/*.scss\"" }, "repository": { @@ -18,13 +20,23 @@ "homepage": "https://github.com/django-hijack/django-hijack#readme", "devDependencies": { "autoprefixer": "^10.4.2", - "cssnano": "^5.1.0", - "postcss": "^8.4.7", - "postcss-cli": "^9.1.0", - "postcss-nested": "^5.0.6", - "sass": "^1.49.9", + "css-loader": "^6.7.1", + "mini-css-extract-plugin": "^2.6.0", + "postcss": "^8.4.12", + "postcss-loader": "^6.2.1", + "sass": "^1.49.10", + "sass-loader": "^12.6.0", + "standard": "^16.0.4", "stylelint": "^14.3.0", - "stylelint-config-standard": "^25.0.0", - "stylelint-scss": "^4.1.0" + "stylelint-config-standard-scss": "^3.0.0", + "stylelint-scss": "^4.1.0", + "webpack": "^5.70.0", + "webpack-cli": "^4.9.2" + }, + "standard": { + "globals": [ + "fetch", + "FormData" + ] } } diff --git a/postcss.config.js b/postcss.config.js index d5adafa5..3784ac50 100644 --- a/postcss.config.js +++ b/postcss.config.js @@ -1,7 +1 @@ -module.exports = { - plugins: { - autoprefixer: {}, - cssnano: {}, - 'postcss-nested': {} - } -} +module.exports = { plugins: ['autoprefixer'] } diff --git a/setup.cfg b/setup.cfg index b6308db8..439aec41 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,14 +20,10 @@ classifiers = Development Status :: 5 - Production/Stable License :: OSI Approved :: MIT License Framework :: Django - Framework :: Django :: 2.2 - Framework :: Django :: 3.0 - Framework :: Django :: 3.1 Framework :: Django :: 3.2 Framework :: Django :: 4.0 Programming Language :: Python Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 @@ -40,20 +36,20 @@ install_requires = django>=2.2 setup_requires = setuptools_scm - pytest-runner -tests_require = + +[options.extras_require] +test = pytest pytest-cov pytest-django -[aliases] -test = pytest - [tool:pytest] -addopts = --cov=hijack --tb=short +addopts = --cov --tb=short +testpaths = + hijack filterwarnings = error -DJANGO_SETTINGS_MODULE=hijack.tests.test_app.settings +DJANGO_SETTINGS_MODULE = hijack.tests.test_app.settings [coverage:run] source = hijack diff --git a/webpack.config.js b/webpack.config.js new file mode 100644 index 00000000..ce71003c --- /dev/null +++ b/webpack.config.js @@ -0,0 +1,30 @@ +const MiniCssExtractPlugin = require('mini-css-extract-plugin') +const path = require('path') + +module.exports = { + entry: { + hijack: [ + './hijack/static/hijack/hijack.js', + './hijack/static/hijack/hijack.scss' + ] + }, + output: { + filename: '[name].min.js', + path: path.resolve(__dirname, 'hijack', 'static', 'hijack'), + clean: false + }, + plugins: [new MiniCssExtractPlugin({ filename: '[name].min.css' })], + module: { + rules: [ + { + test: /\.s[ac]ss$/i, + use: [ + MiniCssExtractPlugin.loader, + 'css-loader', + 'postcss-loader', + 'sass-loader' + ] + } + ] + } +}
piskvorky__gensim-2582
Import KeyedVectors error: cannot import name 'open' <!-- **IMPORTANT**: - Use the [Gensim mailing list](https://groups.google.com/forum/#!forum/gensim) to ask general or usage questions. Github issues are only for bug reports. - Check [Recipes&FAQ](https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ) first for common answers. Github bug reports that do not include relevant information and context will be closed without an answer. Thanks! --> #### Problem description I want to import KeyedVectors. Then I got the error: ImportError: cannot import name 'open' #### Steps/code/corpus to reproduce - Install the latest Gensim version (gensim-3.8.0) - Then, try to run 'from gensim.models import KeyedVectors' - Afterwards I got an error as printed in the below ```Traceback (most recent call last): File "fastText_gensim_convert.py", line 3, in <module> from gensim.models import KeyedVectors File "/usr/local/lib/python3.6/dist-packages/gensim/__init__.py", line 5, in <module> from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/__init__.py", line 4, in <module> from .preprocessing import (remove_stopwords, strip_punctuation, strip_punctuation2, # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/preprocessing.py", line 42, in <module> from gensim import utils File "/usr/local/lib/python3.6/dist-packages/gensim/utils.py", line 45, in <module> from smart_open import open ImportError: cannot import name 'open' ``` #### Versions ``` >>> import platform; print(platform.platform()) Linux-4.15.0-55-generic-x86_64-with-Ubuntu-16.04-xenial >>> import sys; print("Python", sys.version) Python 3.6.9 (default, Jul 3 2019, 15:36:16) [GCC 5.4.0 20160609] >>> import numpy; print("NumPy", numpy.__version__) NumPy 1.15.4 >>> import scipy; print("SciPy", scipy.__version__) SciPy 1.1.0 >>> import gensim; print("gensim", gensim.__version__) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/gensim/__init__.py", line 5, in <module> from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/__init__.py", line 4, in <module> from .preprocessing import (remove_stopwords, strip_punctuation, strip_punctuation2, # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/preprocessing.py", line 42, in <module> from gensim import utils File "/usr/local/lib/python3.6/dist-packages/gensim/utils.py", line 45, in <module> from smart_open import open >>> from gensim.models import word2vec;print("FAST_VERSION", word2vec.FAST_VERSION) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/gensim/__init__.py", line 5, in <module> from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/__init__.py", line 4, in <module> from .preprocessing import (remove_stopwords, strip_punctuation, strip_punctuation2, # noqa:F401 File "/usr/local/lib/python3.6/dist-packages/gensim/parsing/preprocessing.py", line 42, in <module> from gensim import utils File "/usr/local/lib/python3.6/dist-packages/gensim/utils.py", line 45, in <module> from smart_open import open ImportError: cannot import name 'open' ```
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with:\n\nsudo python ./setup.py install\n\"\"\"\n\nimport os\nimport platform\nimport sys\nimport warnings\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\n\nPY2 = sys.version_info[0] == 2\n\nif sys.version_info[:2] < (2, 7) or ((3, 0) <= sys.version_info[:2] < (3, 5)):\n raise Exception('This version of gensim needs Python 2.7, 3.5 or later.')\n\n# the following code is adapted from tornado's setup.py:\n# https://github.com/tornadoweb/tornado/blob/master/setup.py\n# to support installing without the extension on platforms where\n# no compiler is available.\n\n\nclass custom_build_ext(build_ext):\n \"\"\"Allow C extension building to fail.\n\n The C extension speeds up word2vec and doc2vec training, but is not essential.\n \"\"\"\n\n warning_message = \"\"\"\n********************************************************************\nWARNING: %s could not\nbe compiled. No C extensions are essential for gensim to run,\nalthough they do result in significant speed improvements for some modules.\n%s\n\nHere are some hints for popular operating systems:\n\nIf you are seeing this message on Linux you probably need to\ninstall GCC and/or the Python development package for your\nversion of Python.\n\nDebian and Ubuntu users should issue the following command:\n\n $ sudo apt-get install build-essential python-dev\n\nRedHat, CentOS, and Fedora users should issue the following command:\n\n $ sudo yum install gcc python-devel\n\nIf you are seeing this message on OSX please read the documentation\nhere:\n\nhttp://api.mongodb.org/python/current/installation.html#osx\n********************************************************************\n\"\"\"\n\n def run(self):\n try:\n build_ext.run(self)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(\n self.warning_message +\n \"Extension modules\" +\n \"There was an issue with your platform configuration - see above.\")\n\n def build_extension(self, ext):\n name = ext.name\n try:\n build_ext.build_extension(self, ext)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(\n self.warning_message +\n \"The %s extension module\" % (name,) +\n \"The output above this warning shows how the compilation failed.\")\n\n # the following is needed to be able to add numpy's include dirs... without\n # importing numpy directly in this script, before it's actually installed!\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n if isinstance(__builtins__, dict):\n __builtins__[\"__NUMPY_SETUP__\"] = False\n else:\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n\nmodel_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')\ngensim_dir = os.path.join(os.path.dirname(__file__), 'gensim')\n\ncmdclass = {'build_ext': custom_build_ext}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|Travis|_\n|Wheel|_\n\n.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (trivial streaming API)\n * easy to extend with other Vector Space algorithms (trivial transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.\n\nThe simple way to install `gensim` is::\n\n pip install -U gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package,\nyou'd run::\n\n python setup.py test\n python setup.py install\n\n\nFor alternative modes of installation (without root privileges, development\ninstallation, optional install features), see the `install documentation <http://radimrehurek.com/gensim/install.html>`_.\n\nThis version has been tested under Python 2.7, 3.5 and 3.6. Support for Python 2.6, 3.3 and 3.4 was dropped in gensim 1.0.0. Install gensim 0.13.4 if you *must* use Python 2.6, 3.3 or 3.4. Support for Python 2.5 was dropped in gensim 0.10.0; install gensim 0.9.1 if you *must* use Python 2.5). Gensim's github repo is hooked against `Travis CI for automated testing <https://travis-ci.org/RaRe-Technologies/gensim>`_ on every commit push and pull request.\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/gensim%20Quick%20Start.ipynb\n\n\"\"\"\n\n#\n# 1.11.3 is the oldest version of numpy that we support, for historical reasons.\n# 1.16.1 is the last numpy version to support Py2.\n#\n# Similarly, 4.6.4 is the last pytest version to support Py2.\n#\n# https://docs.scipy.org/doc/numpy/release.html\n# https://docs.pytest.org/en/latest/py27-py34-deprecation.html\n#\nif PY2:\n NUMPY_STR = 'numpy >= 1.11.3, <= 1.16.1'\n PYTEST_STR = 'pytest == 4.6.4'\nelse:\n NUMPY_STR = 'numpy >= 1.11.3'\n PYTEST_STR = 'pytest'\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nwin_testenv = [\n PYTEST_STR,\n 'pytest-rerunfailures',\n 'mock',\n 'cython',\n # temporarily remove pyemd to work around appveyor issues\n # 'pyemd',\n 'testfixtures',\n 'Morfessor==2.0.2a4',\n 'python-Levenshtein >= 0.10.2',\n 'visdom >= 0.1.8, != 0.1.8.7',\n]\n\nif sys.version_info[:2] == (2, 7):\n #\n # 0.20.3 is the last version of scikit-learn that supports Py2.\n # Similarly, for version 5.1.1 of tornado. We require tornado indirectly\n # via visdom.\n #\n win_testenv.append('scikit-learn==0.20.3')\n win_testenv.append('tornado==5.1.1')\nelse:\n win_testenv.append('scikit-learn')\n\nlinux_testenv = win_testenv[:]\n\nif sys.version_info < (3, 7):\n linux_testenv.extend([\n 'tensorflow <= 1.3.0',\n 'keras >= 2.0.4, <= 2.1.4',\n 'annoy',\n ])\n\nif (3, 0) < sys.version_info < (3, 7):\n linux_testenv.extend(['nmslib'])\n\next_modules = [\n Extension('gensim.models.word2vec_inner',\n sources=['./gensim/models/word2vec_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.models.doc2vec_inner',\n sources=['./gensim/models/doc2vec_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.corpora._mmreader',\n sources=['./gensim/corpora/_mmreader.c']),\n Extension('gensim.models.fasttext_inner',\n sources=['./gensim/models/fasttext_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.models._utils_any2vec',\n sources=['./gensim/models/_utils_any2vec.c'],\n include_dirs=[model_dir]),\n Extension('gensim._matutils',\n sources=['./gensim/_matutils.c']),\n Extension('gensim.models.nmf_pgd',\n sources=['./gensim/models/nmf_pgd.c'])\n]\n\nif not (os.name == 'nt' and sys.version_info[0] < 3):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n\n ext_modules.append(\n Extension('gensim.models.word2vec_corpusfile',\n sources=['./gensim/models/word2vec_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\n ext_modules.append(\n Extension('gensim.models.fasttext_corpusfile',\n sources=['./gensim/models/fasttext_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\n ext_modules.append(\n Extension('gensim.models.doc2vec_corpusfile',\n sources=['./gensim/models/doc2vec_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\nsetup(\n name='gensim',\n version='3.8.0',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n \n license='LGPLv2.1',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n setup_requires=[\n NUMPY_STR,\n ],\n install_requires=[\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'six >= 1.5.0',\n 'smart_open >= 1.7.0',\n ],\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': linux_testenv + distributed_env + ['sphinx', 'sphinxcontrib-napoleon', 'plotly', 'pattern <= 2.6', 'sphinxcontrib.programoutput'],\n },\n\n include_package_data=True,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with:\n\nsudo python ./setup.py install\n\"\"\"\n\nimport os\nimport platform\nimport sys\nimport warnings\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\n\nPY2 = sys.version_info[0] == 2\n\nif sys.version_info[:2] < (2, 7) or ((3, 0) <= sys.version_info[:2] < (3, 5)):\n raise Exception('This version of gensim needs Python 2.7, 3.5 or later.')\n\n# the following code is adapted from tornado's setup.py:\n# https://github.com/tornadoweb/tornado/blob/master/setup.py\n# to support installing without the extension on platforms where\n# no compiler is available.\n\n\nclass custom_build_ext(build_ext):\n \"\"\"Allow C extension building to fail.\n\n The C extension speeds up word2vec and doc2vec training, but is not essential.\n \"\"\"\n\n warning_message = \"\"\"\n********************************************************************\nWARNING: %s could not\nbe compiled. No C extensions are essential for gensim to run,\nalthough they do result in significant speed improvements for some modules.\n%s\n\nHere are some hints for popular operating systems:\n\nIf you are seeing this message on Linux you probably need to\ninstall GCC and/or the Python development package for your\nversion of Python.\n\nDebian and Ubuntu users should issue the following command:\n\n $ sudo apt-get install build-essential python-dev\n\nRedHat, CentOS, and Fedora users should issue the following command:\n\n $ sudo yum install gcc python-devel\n\nIf you are seeing this message on OSX please read the documentation\nhere:\n\nhttp://api.mongodb.org/python/current/installation.html#osx\n********************************************************************\n\"\"\"\n\n def run(self):\n try:\n build_ext.run(self)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(\n self.warning_message +\n \"Extension modules\" +\n \"There was an issue with your platform configuration - see above.\")\n\n def build_extension(self, ext):\n name = ext.name\n try:\n build_ext.build_extension(self, ext)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(\n self.warning_message +\n \"The %s extension module\" % (name,) +\n \"The output above this warning shows how the compilation failed.\")\n\n # the following is needed to be able to add numpy's include dirs... without\n # importing numpy directly in this script, before it's actually installed!\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n if isinstance(__builtins__, dict):\n __builtins__[\"__NUMPY_SETUP__\"] = False\n else:\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n\nmodel_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')\ngensim_dir = os.path.join(os.path.dirname(__file__), 'gensim')\n\ncmdclass = {'build_ext': custom_build_ext}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|Travis|_\n|Wheel|_\n\n.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (trivial streaming API)\n * easy to extend with other Vector Space algorithms (trivial transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.\n\nThe simple way to install `gensim` is::\n\n pip install -U gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package,\nyou'd run::\n\n python setup.py test\n python setup.py install\n\n\nFor alternative modes of installation (without root privileges, development\ninstallation, optional install features), see the `install documentation <http://radimrehurek.com/gensim/install.html>`_.\n\nThis version has been tested under Python 2.7, 3.5 and 3.6. Support for Python 2.6, 3.3 and 3.4 was dropped in gensim 1.0.0. Install gensim 0.13.4 if you *must* use Python 2.6, 3.3 or 3.4. Support for Python 2.5 was dropped in gensim 0.10.0; install gensim 0.9.1 if you *must* use Python 2.5). Gensim's github repo is hooked against `Travis CI for automated testing <https://travis-ci.org/RaRe-Technologies/gensim>`_ on every commit push and pull request.\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/gensim%20Quick%20Start.ipynb\n\n\"\"\"\n\n#\n# 1.11.3 is the oldest version of numpy that we support, for historical reasons.\n# 1.16.1 is the last numpy version to support Py2.\n#\n# Similarly, 4.6.4 is the last pytest version to support Py2.\n#\n# https://docs.scipy.org/doc/numpy/release.html\n# https://docs.pytest.org/en/latest/py27-py34-deprecation.html\n#\nif PY2:\n NUMPY_STR = 'numpy >= 1.11.3, <= 1.16.1'\n PYTEST_STR = 'pytest == 4.6.4'\nelse:\n NUMPY_STR = 'numpy >= 1.11.3'\n PYTEST_STR = 'pytest'\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nwin_testenv = [\n PYTEST_STR,\n 'pytest-rerunfailures',\n 'mock',\n 'cython',\n # temporarily remove pyemd to work around appveyor issues\n # 'pyemd',\n 'testfixtures',\n 'Morfessor==2.0.2a4',\n 'python-Levenshtein >= 0.10.2',\n 'visdom >= 0.1.8, != 0.1.8.7',\n]\n\nif sys.version_info[:2] == (2, 7):\n #\n # 0.20.3 is the last version of scikit-learn that supports Py2.\n # Similarly, for version 5.1.1 of tornado. We require tornado indirectly\n # via visdom.\n #\n win_testenv.append('scikit-learn==0.20.3')\n win_testenv.append('tornado==5.1.1')\nelse:\n win_testenv.append('scikit-learn')\n\nlinux_testenv = win_testenv[:]\n\nif sys.version_info < (3, 7):\n linux_testenv.extend([\n 'tensorflow <= 1.3.0',\n 'keras >= 2.0.4, <= 2.1.4',\n 'annoy',\n ])\n\nif (3, 0) < sys.version_info < (3, 7):\n linux_testenv.extend(['nmslib'])\n\next_modules = [\n Extension('gensim.models.word2vec_inner',\n sources=['./gensim/models/word2vec_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.models.doc2vec_inner',\n sources=['./gensim/models/doc2vec_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.corpora._mmreader',\n sources=['./gensim/corpora/_mmreader.c']),\n Extension('gensim.models.fasttext_inner',\n sources=['./gensim/models/fasttext_inner.c'],\n include_dirs=[model_dir]),\n Extension('gensim.models._utils_any2vec',\n sources=['./gensim/models/_utils_any2vec.c'],\n include_dirs=[model_dir]),\n Extension('gensim._matutils',\n sources=['./gensim/_matutils.c']),\n Extension('gensim.models.nmf_pgd',\n sources=['./gensim/models/nmf_pgd.c'])\n]\n\nif not (os.name == 'nt' and sys.version_info[0] < 3):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n\n ext_modules.append(\n Extension('gensim.models.word2vec_corpusfile',\n sources=['./gensim/models/word2vec_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\n ext_modules.append(\n Extension('gensim.models.fasttext_corpusfile',\n sources=['./gensim/models/fasttext_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\n ext_modules.append(\n Extension('gensim.models.doc2vec_corpusfile',\n sources=['./gensim/models/doc2vec_corpusfile.cpp'],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n )\n\nsetup(\n name='gensim',\n version='3.8.0',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n \n license='LGPLv2.1',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n setup_requires=[\n NUMPY_STR,\n ],\n install_requires=[\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'six >= 1.5.0',\n 'smart_open >= 1.8.1',\n ],\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': linux_testenv + distributed_env + ['sphinx', 'sphinxcontrib-napoleon', 'plotly', 'pattern <= 2.6', 'sphinxcontrib.programoutput'],\n },\n\n include_package_data=True,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 0e1a99218f..96869bdd4a 100644 --- a/setup.py +++ b/setup.py @@ -381,7 +381,7 @@ def finalize_options(self): NUMPY_STR, 'scipy >= 0.18.1', 'six >= 1.5.0', - 'smart_open >= 1.7.0', + 'smart_open >= 1.8.1', ], tests_require=linux_testenv, extras_require={
pymeasure__pymeasure-909
Check all Channel classes for docstrings #895 added a property docstring test. It works, however, only for the `Instrument` classes which are publicly available. Channels (and some base instruments), which are not imported in the init files, are not checked. This issue is about collecting all `Instrument` and `Channel` subclasses in order to check them for docstring consistencies.
[ { "content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nfrom ..errors import RangeError, RangeException\nfrom .channel import Channel\nfrom .instrument import Instrument\nfrom .resources import list_resources\nfrom .validators import discreteTruncate\n\nfrom . import activetechnologies\nfrom . import advantest\nfrom . import agilent\nfrom . import aja\nfrom . import ametek\nfrom . import ami\nfrom . import anaheimautomation\nfrom . import anapico\nfrom . import andeenhagerling\nfrom . import anritsu\nfrom . import attocube\nfrom . import bkprecision\nfrom . import danfysik\nfrom . import deltaelektronika\nfrom . import edwards\nfrom . import eurotest\nfrom . import fluke\nfrom . import fwbell\nfrom . import hcp\nfrom . import heidenhain\nfrom . import hp\nfrom . import ipgphotonics\nfrom . import keithley\nfrom . import keysight\nfrom . import lakeshore\nfrom . import lecroy\nfrom . import mksinst\nfrom . import newport\nfrom . import ni\nfrom . import oxfordinstruments\nfrom . import parker\nfrom . import pendulum\nfrom . import razorbill\nfrom . import rohdeschwarz\nfrom . import siglenttechnologies\nfrom . import signalrecovery\nfrom . import srs\nfrom . import tcpowerconversion\nfrom . import tektronix\nfrom . import temptronic\nfrom . import texio\nfrom . import thermotron\nfrom . import thorlabs\nfrom . import toptica\nfrom . import velleman\nfrom . import yokogawa\n", "path": "pymeasure/instruments/__init__.py" } ]
[ { "content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nfrom ..errors import RangeError, RangeException\nfrom .channel import Channel\nfrom .instrument import Instrument\nfrom .resources import list_resources\nfrom .validators import discreteTruncate\n\nfrom . import activetechnologies\nfrom . import advantest\nfrom . import agilent\nfrom . import aja\nfrom . import ametek\nfrom . import ami\nfrom . import anaheimautomation\nfrom . import anapico\nfrom . import andeenhagerling\nfrom . import anritsu\nfrom . import attocube\nfrom . import bkprecision\nfrom . import danfysik\nfrom . import deltaelektronika\nfrom . import edwards\nfrom . import eurotest\nfrom . import fluke\nfrom . import fwbell\nfrom . import hcp\nfrom . import heidenhain\nfrom . import hp\nfrom . import ipgphotonics\nfrom . import keithley\nfrom . import keysight\nfrom . import lakeshore\nfrom . import lecroy\nfrom . import mksinst\nfrom . import newport\nfrom . import ni\nfrom . import oxfordinstruments\nfrom . import parker\nfrom . import pendulum\nfrom . import razorbill\nfrom . import rohdeschwarz\nfrom . import siglenttechnologies\nfrom . import signalrecovery\nfrom . import srs\nfrom . import tcpowerconversion\nfrom . import tektronix\nfrom . import teledyne\nfrom . import temptronic\nfrom . import texio\nfrom . import thermotron\nfrom . import thorlabs\nfrom . import toptica\nfrom . import velleman\nfrom . import yokogawa\n", "path": "pymeasure/instruments/__init__.py" } ]
diff --git a/pymeasure/instruments/__init__.py b/pymeasure/instruments/__init__.py index 448bca8219..4dd9b6a422 100644 --- a/pymeasure/instruments/__init__.py +++ b/pymeasure/instruments/__init__.py @@ -67,6 +67,7 @@ from . import srs from . import tcpowerconversion from . import tektronix +from . import teledyne from . import temptronic from . import texio from . import thermotron diff --git a/tests/instruments/test_all_instruments.py b/tests/instruments/test_all_instruments.py index b3c9a12cf1..b93e4000cb 100644 --- a/tests/instruments/test_all_instruments.py +++ b/tests/instruments/test_all_instruments.py @@ -27,39 +27,54 @@ from unittest.mock import MagicMock from pymeasure import instruments -from pymeasure.instruments import Instrument +from pymeasure.instruments import Instrument, Channel # Collect all instruments devices = [] -for manufacturer in dir(instruments): - if manufacturer.startswith("__"): - continue - manu = getattr(instruments, manufacturer) - for dev in dir(manu): +channels = [] + + +def find_devices_in_module(module, devices, channels): + for dev in dir(module): if dev.startswith("__"): continue - d = getattr(manu, dev) + d = getattr(module, dev) try: - b = issubclass(d, Instrument) + i = issubclass(d, Instrument) + c = issubclass(d, Channel) except TypeError: # d is no class continue else: - if b: + if i and d not in devices: devices.append(d) + elif c and d not in channels: + channels.append(d) + + +find_devices_in_module(instruments, devices, channels) # the instruments module itself +for manufacturer in dir(instruments): + if manufacturer.startswith("__"): + continue + manu = getattr(instruments, manufacturer) + find_devices_in_module(manu, devices, channels) # module in instruments package + for module_name in dir(manu): + if module_name.startswith("__"): + continue + module = getattr(manu, module_name) + if type(module).__name__ == "module": + find_devices_in_module(module, devices, channels) # module in manufacturer package # Collect all properties -# TODO add Channel properties as well: How collect all Channel classes? properties = [] -for device in devices: +for device in devices + channels: for property_name in dir(device): prop = getattr(device, property_name) if isinstance(prop, property): properties.append((device, property_name, prop)) - # Instruments unable to accept an Adapter instance. proper_adapters = [] # Instruments with communication in their __init__, which consequently fails. @@ -75,20 +90,40 @@ "IBeamSmart", "ANC300Controller", ] +# Channels which are still an Instrument subclass +channel_as_instrument_subclass = [ + "SMU", # agilent/agilent4156 + "VMU", # agilent/agilent4156 + "VSU", # agilent/agilent4156 + "VARX", # agilent/agilent4156 + "VAR1", # agilent/agilent4156 + "VAR2", # agilent/agilent4156 + "VARD", # agilent/agilent4156 +] # Instruments whose property docstrings are not YET in accordance with the style (Get, Set, Control) grandfathered_docstring_instruments = [ "AWG401x_AFG", "AWG401x_AWG", "AdvantestR3767CG", + "AdvantestR624X", + "SMUChannel", # AdvantestR624X "AdvantestR6245", "AdvantestR6246", "Agilent33220A", "Agilent33500", + "Agilent33500Channel", "Agilent33521A", "Agilent34410A", "Agilent34450A", "Agilent34450A", "Agilent4156", + "SMU", # agilent/agilent4156 + "VMU", # agilent/agilent4156 + "VSU", # agilent/agilent4156 + "VARX", # agilent/agilent4156 + "VAR1", # agilent/agilent4156 + "VAR2", # agilent/agilent4156 + "VARD", # agilent/agilent4156 "Agilent8257D", "Agilent8722ES", "Agilent8722ES", @@ -137,6 +172,8 @@ "LakeShore421", "LakeShore425", "LakeShore425", + "LakeShoreTemperatureChannel", + "LakeShoreHeaterChannel", "LeCroyT3DSO1204", "MKS937B", "IPS120_10", @@ -153,6 +190,8 @@ "SFM", "SPD1168X", "SPD1305X", + "SPDSingleChannelBase", + "SPDBase", "DSP7265", "DSP7265", "DSP7265", @@ -175,16 +214,25 @@ "VellemanK8090", "Yokogawa7651", "YokogawaGS200", + "ScopeChannel", + "IonGaugeAndPressureChannel", + "PressureChannel", + "SequenceEntry", + "ChannelBase", + "ChannelAWG", + "ChannelAFG", ] @pytest.mark.parametrize("cls", devices) def test_adapter_arg(cls): - "Test that every instrument has adapter as their input argument" + "Test that every instrument has adapter as their input argument." if cls.__name__ in proper_adapters: pytest.skip(f"{cls.__name__} does not accept an Adapter instance.") elif cls.__name__ in need_init_communication: pytest.skip(f"{cls.__name__} requires communication in init.") + elif cls.__name__ in channel_as_instrument_subclass: + pytest.skip(f"{cls.__name__} is a channel, not an instrument.") elif cls.__name__ == "Instrument": pytest.skip("`Instrument` requires a `name` parameter.") cls(adapter=MagicMock()) @@ -192,9 +240,11 @@ def test_adapter_arg(cls): @pytest.mark.parametrize("cls", devices) def test_name_argument(cls): - "Test that every instrument accepts a name argument" + "Test that every instrument accepts a name argument." if cls.__name__ in (*proper_adapters, *need_init_communication): pytest.skip(f"{cls.__name__} cannot be tested without communication.") + elif cls.__name__ in channel_as_instrument_subclass: + pytest.skip(f"{cls.__name__} is a channel, not an instrument.") inst = cls(adapter=MagicMock(), name="Name_Test") assert inst.name == "Name_Test" @@ -211,6 +261,8 @@ def test_kwargs_to_adapter(cls): """Verify that kwargs are accepted and handed to the adapter.""" if cls.__name__ in (*proper_adapters, *need_init_communication): pytest.skip(f"{cls.__name__} cannot be tested without communication.") + elif cls.__name__ in channel_as_instrument_subclass: + pytest.skip(f"{cls.__name__} is a channel, not an instrument.") elif cls.__name__ == "Instrument": pytest.skip("`Instrument` requires a `name` parameter.") @@ -232,6 +284,6 @@ def test_property_docstrings(prop_set): pytest.skip(f"{device.__name__} is in the codebase and has to be refactored later on.") start = prop.__doc__.split(maxsplit=1)[0] assert start in ("Control", "Measure", "Set", "Get"), ( - f"'{device.__name__}.{property_name}' docstrings does start with '{start}', not 'Control', " + f"'{device.__name__}.{property_name}' docstring does start with '{start}', not 'Control', " "'Measure', 'Get', or 'Set'." )
nvaccess__nvda-10921
VS Code: Reduce the number of times one has to use NVDA+Space to switch modes. I just filed [this issue](https://github.com/microsoft/vscode/issues/93087) against VS Code where I suggest to use targeted role="document" in those places that produce HTML output for consumption, to make NVDA switch in and out of browse mode in a smart, automated, manner, reducing the number of times one has to use NVDA+Space to toggle modes. Examples I found while using the 1.44 VS Code Insider builds were: * The Welcome page * The details page for an extension * The ReadMe file that may be displayed after an extension has been installed. @leonardder suggested that, once this lands in stable, a modification might be needed for the VS Code app module. So filing this issue here.
[ { "content": "#appModules/code.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2019 NV Access Limited, Babbage B.V.\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\n\nimport appModuleHandler\n\nclass AppModule(appModuleHandler.AppModule):\n\tdisableBrowseModeByDefault = True\n", "path": "source/appModules/code.py" } ]
[ { "content": null, "path": "source/appModules/code.py" } ]
diff --git a/source/appModules/code.py b/source/appModules/code.py deleted file mode 100644 index a85d0b49b9f..00000000000 --- a/source/appModules/code.py +++ /dev/null @@ -1,10 +0,0 @@ -#appModules/code.py -#A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2019 NV Access Limited, Babbage B.V. -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. - -import appModuleHandler - -class AppModule(appModuleHandler.AppModule): - disableBrowseModeByDefault = True
getsentry__sentry-52329
fix(django): Disable admin on prod Reported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk
[ { "content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py" } ]
diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py index 15d3c3af373784..6053b9ec144d7a 100644 --- a/src/sentry/conf/urls.py +++ b/src/sentry/conf/urls.py @@ -29,7 +29,7 @@ ), ] -if "django.contrib.admin" in settings.INSTALLED_APPS: +if "django.contrib.admin" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED: from sentry import django_admin urlpatterns += django_admin.urlpatterns
Pyomo__pyomo-1272
validate does not raise Exception When `AbstractModel.create_instance` is called, if the `validate` function of a Parameter returns `False`, the value is not assigned but no Exception is raised. Example: ```python from pyomo import environ as pe m = pe.AbstractModel() m.p = pe.Param(validate = lambda m,v: False) data = {None:{ 'p': {None: 0.2}}} cm = m.create_instance(data=data) ``` does not raise any error. When one tries to retrieve the value though, `ValueError` is raised because the value has not been set. ```python >>> cm.p.value ValueError: Error retrieving immutable Param value (p): The Param value is undefined and no default value is specified. ``` There might be reasons not to raise an exception, but failing to set a value silently can create a lot of confusion. Besides, the [docs](https://pyomo.readthedocs.io/en/latest/pyomo_modeling_components/Parameters.html?#parameters) clearly say: > If a value is provided that is less than that, the model instantation would be terminated and an error message issued Imho, validation is a great feature but I understand it introduces a lot of challenges. If the functionality does not behave as expected, it should at least be (temporarily, hopefully) deprecated, or issue a warning. validate does not raise Exception When `AbstractModel.create_instance` is called, if the `validate` function of a Parameter returns `False`, the value is not assigned but no Exception is raised. Example: ```python from pyomo import environ as pe m = pe.AbstractModel() m.p = pe.Param(validate = lambda m,v: False) data = {None:{ 'p': {None: 0.2}}} cm = m.create_instance(data=data) ``` does not raise any error. When one tries to retrieve the value though, `ValueError` is raised because the value has not been set. ```python >>> cm.p.value ValueError: Error retrieving immutable Param value (p): The Param value is undefined and no default value is specified. ``` There might be reasons not to raise an exception, but failing to set a value silently can create a lot of confusion. Besides, the [docs](https://pyomo.readthedocs.io/en/latest/pyomo_modeling_components/Parameters.html?#parameters) clearly say: > If a value is provided that is less than that, the model instantation would be terminated and an error message issued Imho, validation is a great feature but I understand it introduces a lot of challenges. If the functionality does not behave as expected, it should at least be (temporarily, hopefully) deprecated, or issue a warning.
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n__all__ = ['Param']\n\nimport sys\nimport types\nimport logging\nfrom weakref import ref as weakref_ref\n\nfrom pyomo.common.modeling import NoArgumentGiven\nfrom pyomo.common.timing import ConstructionTimer\nfrom pyomo.core.base.plugin import ModelComponentFactory\nfrom pyomo.core.base.component import ComponentData\nfrom pyomo.core.base.indexed_component import IndexedComponent, \\\n UnindexedComponent_set\nfrom pyomo.core.base.misc import apply_indexed_rule, apply_parameterized_indexed_rule\nfrom pyomo.core.base.numvalue import NumericValue, native_types, value\nfrom pyomo.core.base.set_types import Any\n\nfrom six import iteritems, iterkeys, next, itervalues\n\nlogger = logging.getLogger('pyomo.core')\n\ndef _raise_modifying_immutable_error(obj, index):\n if obj.is_indexed():\n name = \"%s[%s]\" % (obj.name, index)\n else:\n name = obj.name\n raise TypeError(\n \"Attempting to set the value of the immutable parameter \"\n \"%s after the parameter has been constructed. If you intend \"\n \"to change the value of this parameter dynamically, please \"\n \"declare the parameter as mutable [i.e., Param(mutable=True)]\"\n % (name,))\n\n\nclass _NotValid(object):\n \"\"\"A dummy type that is pickle-safe that we can use as the default\n value for Params to indicate that no valid value is present.\"\"\"\n pass\n\n\nclass _ParamData(ComponentData, NumericValue):\n \"\"\"\n This class defines the data for a mutable parameter.\n\n Constructor Arguments:\n owner The Param object that owns this data.\n value The value of this parameter.\n\n Public Class Attributes:\n value The numeric value of this variable.\n \"\"\"\n\n __slots__ = ('_value',)\n\n def __init__(self, component):\n #\n # The following is equivalent to calling\n # the base ComponentData constructor.\n #\n self._component = weakref_ref(component)\n #\n # The following is equivalent to calling the\n # base NumericValue constructor.\n #\n self._value = _NotValid\n\n def __getstate__(self):\n \"\"\"\n This method must be defined because this class uses slots.\n \"\"\"\n state = super(_ParamData, self).__getstate__()\n for i in _ParamData.__slots__:\n state[i] = getattr(self, i)\n return state\n\n # Note: because NONE of the slots on this class need to be edited,\n # we don't need to implement a specialized __setstate__ method.\n\n def clear(self):\n \"\"\"Clear the data in this component\"\"\"\n self._value = _NotValid\n\n # FIXME: ComponentData need to have pointers to their index to make\n # operations like validation efficient. As it stands now, if\n # set_value is called without specifying an index, this call\n # involves a linear scan of the _data dict.\n def set_value(self, value, idx=NoArgumentGiven):\n self._value = value\n if idx is NoArgumentGiven:\n idx = self.index()\n self.parent_component()._validate_value(idx, value)\n\n def __call__(self, exception=True):\n \"\"\"\n Return the value of this object.\n \"\"\"\n if self._value is _NotValid:\n if exception:\n raise ValueError(\n \"Error evaluating Param value (%s):\\n\\tThe Param value is \"\n \"currently set to an invalid value. This is\\n\\ttypically \"\n \"from a scalar Param or mutable Indexed Param without\\n\"\n \"\\tan initial or default value.\"\n % ( self.name, ))\n else:\n return None\n return self._value\n\n @property\n def value(self):\n \"\"\"Return the value for this variable.\"\"\"\n return self()\n @value.setter\n def value(self, val):\n \"\"\"Set the value for this variable.\"\"\"\n self.set_value(val)\n\n\n def is_fixed(self):\n \"\"\"\n Returns True because this value is fixed.\n \"\"\"\n return True\n\n def is_constant(self):\n \"\"\"\n Returns False because this is not a constant in an expression.\n \"\"\"\n return False\n\n def is_parameter_type(self):\n \"\"\"\n Returns True because this is a parameter object.\n \"\"\"\n return True\n\n def is_variable_type(self):\n \"\"\"\n Returns False because this is not a variable object.\n \"\"\"\n return False\n\n def is_expression_type(self):\n \"\"\"Returns False because this is not an expression\"\"\"\n return False\n\n def is_potentially_variable(self):\n \"\"\"\n Returns False because this object can never reference variables.\n \"\"\"\n return False\n\n def _compute_polynomial_degree(self, result):\n \"\"\"\n Returns 0 because this object can never reference variables.\n \"\"\"\n return 0\n\n def __nonzero__(self):\n \"\"\"Return True if the value is defined and non-zero.\"\"\"\n return bool(self())\n\n __bool__ = __nonzero__\n\n\[email protected](\"Parameter data that is used to define a model instance.\")\nclass Param(IndexedComponent):\n \"\"\"\n A parameter value, which may be defined over an index.\n\n Constructor Arguments:\n name \n The name of this parameter\n index \n The index set that defines the distinct parameters. By default, \n this is None, indicating that there is a single parameter.\n domain \n A set that defines the type of values that each parameter must be.\n within \n A set that defines the type of values that each parameter must be.\n validate \n A rule for validating this parameter w.r.t. data that exists in \n the model\n default \n A scalar, rule, or dictionary that defines default values for \n this parameter\n initialize \n A dictionary or rule for setting up this parameter with existing \n model data\n \"\"\"\n\n DefaultMutable = False\n\n def __new__(cls, *args, **kwds):\n if cls != Param:\n return super(Param, cls).__new__(cls)\n if not args or (args[0] is UnindexedComponent_set and len(args)==1):\n return SimpleParam.__new__(SimpleParam)\n else:\n return IndexedParam.__new__(IndexedParam)\n\n def __init__(self, *args, **kwd):\n self._rule = kwd.pop('rule', _NotValid )\n self._rule = kwd.pop('initialize', self._rule )\n self._validate = kwd.pop('validate', None )\n self.domain = kwd.pop('domain', Any )\n self.domain = kwd.pop('within', self.domain )\n self._mutable = kwd.pop('mutable', Param.DefaultMutable )\n self._default_val = kwd.pop('default', _NotValid )\n self._dense_initialize = kwd.pop('initialize_as_dense', False)\n #\n if 'repn' in kwd:\n logger.error(\n \"The 'repn' keyword is not a validate keyword argument for Param\")\n #\n if self.domain is None:\n self.domain = Any\n #\n kwd.setdefault('ctype', Param)\n IndexedComponent.__init__(self, *args, **kwd)\n\n def __len__(self):\n \"\"\"\n Return the number of component data objects stored by this\n component. If a default value is specified, then the\n length equals the number of items in the component index.\n \"\"\"\n if self._default_val is _NotValid:\n return len(self._data)\n return len(self._index)\n\n def __contains__(self, idx):\n \"\"\"\n Return true if the index is in the dictionary. If the default value\n is specified, then all members of the component index are valid.\n \"\"\"\n if self._default_val is _NotValid:\n return idx in self._data\n return idx in self._index\n\n def __iter__(self):\n \"\"\"\n Iterate over the keys in the dictionary. If the default value is\n specified, then iterate over all keys in the component index.\n \"\"\"\n if self._default_val is _NotValid:\n return self._data.__iter__()\n return self._index.__iter__()\n\n def is_expression_type(self):\n \"\"\"Returns False because this is not an expression\"\"\"\n return False\n\n #\n # These are \"sparse equivalent\" access / iteration methods that\n # only loop over the defined data.\n #\n\n def sparse_keys(self):\n \"\"\"Return a list of keys in the defined parameters\"\"\"\n return list(iterkeys(self._data))\n\n def sparse_values(self):\n \"\"\"Return a list of the defined param data objects\"\"\"\n return list(itervalues(self._data))\n\n def sparse_items(self):\n \"\"\"Return a list (index,data) tuples for defined parameters\"\"\"\n return list(iteritems(self._data))\n\n def sparse_iterkeys(self):\n \"\"\"Return an iterator for the keys in the defined parameters\"\"\"\n return iterkeys(self._data)\n\n def sparse_itervalues(self):\n \"\"\"Return an iterator for the defined param data objects\"\"\"\n return itervalues(self._data)\n\n def sparse_iteritems(self):\n \"\"\"Return an iterator of (index,data) tuples for defined parameters\"\"\"\n return iteritems(self._data)\n\n def extract_values(self):\n \"\"\"\n A utility to extract all index-value pairs defined for this\n parameter, returned as a dictionary.\n\n This method is useful in contexts where key iteration and\n repeated __getitem__ calls are too expensive to extract\n the contents of a parameter.\n \"\"\"\n if self._mutable:\n #\n # The parameter is mutable, parameter data are ParamData types.\n # Thus, we need to create a temporary dictionary that contains the\n # values from the ParamData objects.\n #\n ans = {}\n for key, param_value in self.iteritems():\n ans[key] = param_value()\n return ans\n elif not self.is_indexed():\n #\n # The parameter is a scalar, so we need to create a temporary\n # dictionary using the value for this parameter.\n #\n return { None: self() }\n else:\n #\n # The parameter is not mutable, so iteritems() can be\n # converted into a dictionary containing parameter values.\n #\n return dict( self.iteritems() )\n\n def extract_values_sparse(self):\n \"\"\"\n A utility to extract all index-value pairs defined with non-default\n values, returned as a dictionary.\n\n This method is useful in contexts where key iteration and\n repeated __getitem__ calls are too expensive to extract\n the contents of a parameter.\n \"\"\"\n if self._mutable:\n #\n # The parameter is mutable, parameter data are ParamData types.\n # Thus, we need to create a temporary dictionary that contains the\n # values from the ParamData objects.\n #\n ans = {}\n for key, param_value in self.sparse_iteritems():\n ans[key] = param_value()\n return ans\n elif not self.is_indexed():\n #\n # The parameter is a scalar, so we need to create a temporary\n # dictionary using the value for this parameter.\n #\n return { None: self() }\n else:\n #\n # The parameter is not mutable, so sparse_iteritems() can be\n # converted into a dictionary containing parameter values.\n #\n return dict( self.sparse_iteritems() )\n\n def store_values(self, new_values, check=True):\n \"\"\"\n A utility to update a Param with a dictionary or scalar.\n\n If check=True, then both the index and value\n are checked through the __getitem__ method. Using check=False\n should only be used by developers!\n \"\"\"\n if not self._mutable:\n _raise_modifying_immutable_error(self, '*')\n #\n _srcType = type(new_values)\n _isDict = _srcType is dict or ( \\\n hasattr(_srcType, '__getitem__')\n and not isinstance(new_values, NumericValue) )\n #\n if check:\n if _isDict:\n for index, new_value in iteritems(new_values):\n self[index] = new_value\n else:\n for index in self._index:\n self[index] = new_values\n return\n #\n # The argument check is False, so we bypass almost all of the\n # Param logic for ensuring data integrity.\n #\n if self.is_indexed():\n if _isDict:\n # It is possible that the Param is sparse and that the\n # index is not already in the _data dict. As these\n # cases are rare, we will recover from the exception\n # instead of incurring the penalty of checking.\n for index, new_value in iteritems(new_values):\n if index not in self._data:\n self._data[index] = _ParamData(self)\n self._data[index]._value = new_value\n else:\n # For scalars, we will choose an approach based on\n # how \"dense\" the Param is\n if not self._data: # empty\n for index in self._index:\n p = self._data[index] = _ParamData(self)\n p._value = new_values\n elif len(self._data) == len(self._index):\n for index in self._index:\n self._data[index]._value = new_values\n else:\n for index in self._index:\n if index not in self._data:\n self._data[index] = _ParamData(self)\n self._data[index]._value = new_values\n else:\n #\n # Initialize a scalar\n #\n if _isDict:\n if None not in new_values:\n raise RuntimeError(\n \"Cannot store value for scalar Param %s:\\n\\tNo value \"\n \"with index None in the new values dict.\"\n % (self.name,))\n new_values = new_values[None]\n # scalars have to be handled differently\n self[None] = new_values\n\n def set_default(self, val):\n \"\"\"\n Perform error checks and then set the default value for this parameter.\n\n NOTE: this test will not validate the value of function return values.\n \"\"\"\n if self._constructed \\\n and val is not _NotValid \\\n and type(val) in native_types \\\n and val not in self.domain:\n raise ValueError(\n \"Default value (%s) is not valid for Param %s domain %s\" %\n (str(val), self.name, self.domain.name))\n self._default_val = val\n\n def default(self):\n \"\"\"\n Return the value of the parameter default.\n\n Possible values:\n None \n No default value is provided.\n Numeric \n A constant value that is the default value for all undefined \n parameters.\n Function \n f(model, i) returns the value for the default value for \n parameter i\n \"\"\"\n return self._default_val\n\n def _getitem_when_not_present(self, index):\n \"\"\"\n Returns the default component data value\n \"\"\"\n #\n # Local values\n #\n val = self._default_val\n if val is _NotValid:\n # We should allow the creation of mutable params without\n # a default value, as long as *solving* a model without\n # reasonable values produces an informative error.\n if self._mutable:\n # Note: _ParamData defaults to _NotValid\n ans = self._data[index] = _ParamData(self)\n return ans\n if self.is_indexed():\n idx_str = '%s[%s]' % (self.name, index,)\n else:\n idx_str = '%s' % (self.name,)\n raise ValueError(\n \"Error retrieving immutable Param value (%s):\\n\\tThe Param \"\n \"value is undefined and no default value is specified.\"\n % ( idx_str,) )\n\n _default_type = type(val)\n _check_value_domain = True\n if _default_type in native_types:\n #\n # The set_default() method validates the domain of native types, so\n # we can skip the check on the value domain.\n #\n _check_value_domain = False\n elif _default_type is types.FunctionType:\n val = apply_indexed_rule(self, val, self.parent_block(), index)\n elif hasattr(val, '__getitem__') and (\n not isinstance(val, NumericValue) or val.is_indexed() ):\n # Things that look like Dictionaries should be allowable. This\n # includes other IndexedComponent objects.\n val = val[index]\n else:\n # this is something simple like a non-indexed component\n pass\n\n #\n # If the user wants to validate values, we need to validate the\n # default value as well. For Mutable Params, this is easy:\n # _setitem_impl will inject the value into _data and\n # then call validate.\n #\n if self._mutable:\n return self._setitem_when_not_present(index, val)\n #\n # For immutable params, we never inject the default into the data\n # dictionary. This will break validation, as the validation rule is\n # allowed to assume the data is already present (actually, it will\n # die on infinite recursion, as Param.__getitem__() will re-call\n # _getitem_when_not_present).\n #\n # So, we will do something very inefficient: if we are\n # validating, we will inject the value into the dictionary,\n # call validate, and remove it.\n #\n if _check_value_domain or self._validate:\n try:\n self._data[index] = val\n self._validate_value(index, val, _check_value_domain)\n finally:\n del self._data[index]\n\n return val\n\n def _setitem_impl(self, index, obj, value):\n \"\"\"The __setitem__ method performs significant validation around the\n input indices, particularly when the index value is new. In\n various contexts, we don't need to incur this overhead\n (e.g. during initialization). The _setitem_impl\n assumes the input value is in the set native_types\n\n \"\"\"\n #\n # We need to ensure that users don't override the value for immutable\n # parameters.\n #\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n #\n # Params should contain *values*. Note that if we just call\n # value(), then that forces the value to be a numeric value.\n # Notably, we allow Params with domain==Any to hold strings, tuples,\n # etc. The following lets us use NumericValues to initialize\n # Params, but is optimized to check for \"known\" native types to\n # bypass a potentially expensive isinstance()==False call.\n #\n if value.__class__ not in native_types:\n if isinstance(value, NumericValue):\n value = value()\n #\n # Set the value depending on the type of param value.\n #\n if self._mutable:\n obj.set_value(value, index)\n return obj\n else:\n self._data[index] = value\n # Because we do not have a _ParamData, we cannot rely on the\n # validation that occurs in _ParamData.set_value()\n self._validate_value(index, value)\n return value\n\n def _setitem_when_not_present(self, index, value, _check_domain=True):\n #\n # We need to ensure that users don't override the value for immutable\n # parameters.\n #\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n #\n # Params should contain *values*. Note that if we just call\n # value(), then that forces the value to be a numeric value.\n # Notably, we allow Params with domain==Any to hold strings, tuples,\n # etc. The following lets us use NumericValues to initialize\n # Params, but is optimized to check for \"known\" native types to\n # bypass a potentially expensive isinstance()==False call.\n #\n if value.__class__ not in native_types:\n if isinstance(value, NumericValue):\n value = value()\n\n #\n # Set the value depending on the type of param value.\n #\n try:\n if index is None and not self.is_indexed():\n self._data[None] = self\n self.set_value(value, index)\n return self\n elif self._mutable:\n obj = self._data[index] = _ParamData(self)\n obj.set_value(value, index)\n return obj\n else:\n self._data[index] = value\n # Because we do not have a _ParamData, we cannot rely on the\n # validation that occurs in _ParamData.set_value()\n self._validate_value(index, value, _check_domain)\n return value\n except:\n del self._data[index]\n\n\n def _validate_value(self, index, value, validate_domain=True):\n \"\"\"\n Validate a given input/value pair.\n \"\"\"\n #\n # Check if the value is valid within the current domain\n #\n if validate_domain and not value in self.domain:\n raise ValueError(\n \"Invalid parameter value: %s[%s] = '%s', value type=%s.\\n\"\n \"\\tValue not in parameter domain %s\" %\n (self.name, index, value, type(value), self.domain.name))\n if self._validate:\n valid = apply_parameterized_indexed_rule(\n self, self._validate, self.parent_block(), value, index )\n if not valid:\n raise ValueError(\n \"Invalid parameter value: %s[%s] = '%s', value type=%s.\\n\"\n \"\\tValue failed parameter validation rule\" %\n ( self.name, index, value, type(value) ) )\n\n def _initialize_from(self, _init):\n \"\"\"\n Initialize data from a rule or data\n \"\"\"\n _init_type = type(_init)\n _isDict = _init_type is dict\n\n if _isDict or _init_type in native_types:\n #\n # We skip the other tests if we have a dictionary or constant\n # value, as these are the most common cases.\n #\n pass\n\n elif _init_type is types.FunctionType:\n #\n # Initializing from a function\n #\n if not self.is_indexed():\n #\n # A scalar value has a single value.\n # We call __setitem__, which does checks on the value.\n #\n self._setitem_when_not_present(None, _init(self.parent_block()))\n return\n else:\n #\n # An indexed parameter, where we call the function for each\n # index.\n #\n self_parent = self.parent_block()\n #\n try:\n #\n # Create an iterator for the indices. We assume that\n # it returns flattened tuples. Otherwise,\n # the validation process is far too expensive.\n #\n _iter = self._index.__iter__()\n idx = next(_iter)\n #\n # If a function returns a dict (or\n # dict-like thing), then we initialize the Param object\n # by reseting _init and _isDict\n #\n # Note that this logic allows the user to call a\n # function without an index\n #\n val = apply_indexed_rule(self, _init, self_parent, idx)\n\n #\n # The following is a simplification of the main\n # _initialize_from logic. The idea is that if the\n # function returns a scalar-like thing, use it to\n # initialize this index and re-call the function for\n # the next value. However, if the function returns\n # something that is dict-like, then use the dict to\n # initialize everything and do not re-call the\n # initialize function.\n #\n # Note: while scalar components are technically\n # \"dict-like\", we will treat them as scalars and\n # re-call the initialize function.\n #\n _dict_like = False\n if type(val) is dict:\n _dict_like = True\n elif isinstance(val, IndexedComponent):\n _dict_like = val.is_indexed()\n elif hasattr(val, '__getitem__') \\\n and not isinstance(val, NumericValue):\n try:\n for x in _init:\n _init.__getitem__(x)\n _dict_like = True\n except:\n pass\n\n if _dict_like:\n _init = val\n _isDict = True\n else:\n #\n # At this point, we know the value is specific\n # to this index (i.e., not likely to be a\n # dict-like thing), and that the index is valid;\n # so, it is safe to use _setitem_impl\n # (which will perform all the domain /\n # validation checking)\n #\n self._setitem_when_not_present(idx, val)\n #\n # Now iterate over the rest of the index set.\n #\n for idx in _iter:\n self._setitem_when_not_present(\n idx, apply_indexed_rule(\n self, _init, self_parent, idx))\n return\n except StopIteration:\n #\n # The index set was empty... The parameter is indexed by\n # an empty set, or an empty set tuple. Rare, but it has\n # happened.\n #\n return\n\n elif isinstance(_init, NumericValue):\n #\n # Reduce NumericValues to scalars. This allows us to treat\n # scalar components as numbers and not\n # as indexed components with a index set of [None]\n #\n _init = _init()\n\n elif isinstance(_init, IndexedComponent):\n #\n # Ideally, we want to reduce IndexedComponents to\n # a dict, but without \"densifying\" it. However, since\n # there is no way to (easily) get the default value, we\n # will take the \"less surprising\" route of letting the\n # source become dense, so that we get the expected copy.\n #\n # TBD: Are there use-cases where we want to maintain sparsity?\n #\n _init_keys_len = sum(1 for _ in _init.keys())\n sparse_src = len(_init) != _init_keys_len\n tmp = dict( _init.iteritems() )\n if sparse_src and len(_init) == _init_keys_len:\n logger.warning(\"\"\"\nInitializing Param %s using a sparse mutable indexed component (%s).\nThis has resulted in the conversion of the source to dense form.\n\"\"\" % (self.name, _init.name))\n _init = tmp\n _isDict = True\n\n #\n # If the _init is not a native dictionary, but it\n # behaves like one (that is, it could be converted to a\n # dict with \"dict((key,_init[key]) for key in _init)\"),\n # then we will treat it as such\n #\n # TODO: Establish a use-case for this. This iteration is\n # expensive.\n #\n if not _isDict and hasattr(_init, '__getitem__'):\n try:\n _isDict = True\n for x in _init:\n _init.__getitem__(x)\n except:\n _isDict = False\n #\n # Now, we either have a scalar or a dictionary\n #\n if _isDict:\n #\n # Because this is a user-specified dictionary, we\n # must use the normal (expensive) __setitem__ route\n # so that the individual indices are validated.\n #\n for key in _init:\n self[key] = _init[key]\n else:\n try:\n #\n # A constant is being supplied as a default to\n # a parameter. This happens for indexed parameters,\n # particularly when dealing with mutable parameters.\n #\n # We look at the first iteration index separately to\n # to validate the value against the domain once.\n #\n _iter = self._index.__iter__()\n idx = next(_iter)\n self._setitem_when_not_present(idx, _init)\n #\n # Note: the following is safe for both indexed and\n # non-indexed parameters: for non-indexed, the first\n # idx (above) will be None, and the for-loop below\n # will NOT be called.\n #\n if self._mutable:\n _init = self[idx]._value\n for idx in _iter:\n self._setitem_when_not_present(idx, _init)\n else:\n _init = self[idx]\n for idx in _iter:\n self._setitem_when_not_present(\n idx, _init, _check_domain=False )\n except StopIteration:\n #\n # The index set was empty...\n # The parameter is indexed by an empty set, or an empty set tuple.\n # Rare, but it has happened.\n #\n pass\n\n def construct(self, data=None):\n \"\"\"\n Initialize this component.\n\n A parameter is constructed using the initial data or\n the data loaded from an external source. We first\n set all the values based on self._rule, and then\n allow the data dictionary to overwrite anything.\n\n Note that we allow an undefined Param value to be\n constructed. We throw an exception if a user tries\n to use an uninitialized Param.\n \"\"\"\n if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover\n logger.debug(\"Constructing Param, name=%s, from data=%s\"\n % ( self.name, str(data) ))\n #\n if self._constructed:\n return\n timer = ConstructionTimer(self)\n #\n # If the default value is a simple type, we check it versus\n # the domain.\n #\n val = self._default_val\n if val is not _NotValid \\\n and type(val) in native_types \\\n and val not in self.domain:\n raise ValueError(\n \"Default value (%s) is not valid for Param %s domain %s\" %\n (str(val), self.name, self.domain.name))\n #\n # Flag that we are in the \"during construction\" phase\n #\n self._constructed = None\n #\n # Step #1: initialize data from rule value\n #\n if self._rule is not _NotValid:\n self._initialize_from(self._rule)\n #\n # Step #2: allow any user-specified (external) data to override\n # the initialization\n #\n if data is not None:\n try:\n for key, val in iteritems(data):\n self._setitem_when_not_present(\n self._validate_index(key), val)\n except Exception:\n msg = sys.exc_info()[1]\n if type(data) is not dict:\n raise ValueError(\n \"Attempting to initialize parameter=%s with data=%s.\\n\"\n \"\\tData type is not a dictionary, and a dictionary is \"\n \"expected.\" % (self.name, str(data)) )\n else:\n raise RuntimeError(\n \"Failed to set value for param=%s, index=%s, value=%s.\"\n \"\\n\\tsource error message=%s\"\n % (self.name, str(key), str(val), str(msg)) )\n #\n # Flag that things are fully constructed now (and changing an\n # inmutable Param is now an exception).\n #\n self._constructed = True\n\n # populate all other indices with default data\n # (avoids calling _set_contains on self._index at runtime)\n if self._dense_initialize:\n self.to_dense_data()\n timer.report()\n\n def reconstruct(self, data=None):\n \"\"\"\n Reconstruct this parameter object. This is particularly useful\n for cases where an initialize rule is provided. An initialize\n rule can return an expression that is a function of other\n parameters, so reconstruction can account for changes in dependent\n parameters.\n\n Only mutable parameters can be reconstructed. Otherwise, the\n changes would not be propagated into expressions in objectives\n or constraints.\n \"\"\"\n if not self._mutable:\n raise RuntimeError(\n \"Cannot invoke reconstruct method of immutable Param %s\"\n % (self.name,))\n IndexedComponent.reconstruct(self, data=data)\n\n def _pprint(self):\n \"\"\"\n Return data that will be printed for this component.\n \"\"\"\n if self._default_val is _NotValid:\n default = \"None\" # for backwards compatibility in reporting\n elif type(self._default_val) is types.FunctionType:\n default = \"(function)\"\n else:\n default = str(self._default_val)\n if self._mutable or not self.is_indexed():\n dataGen = lambda k, v: [ v._value, ]\n else:\n dataGen = lambda k, v: [ v, ]\n return ( [(\"Size\", len(self)),\n (\"Index\", self._index if self.is_indexed() else None),\n (\"Domain\", self.domain.name),\n (\"Default\", default),\n (\"Mutable\", self._mutable),\n ],\n self.sparse_iteritems(),\n (\"Value\",),\n dataGen,\n )\n\n\nclass SimpleParam(_ParamData, Param):\n\n def __init__(self, *args, **kwds):\n Param.__init__(self, *args, **kwds)\n _ParamData.__init__(self, component=self)\n\n #\n # Since this class derives from Component and Component.__getstate__\n # just packs up the entire __dict__ into the state dict, there s\n # nothng special that we need to do here. We will just defer to the\n # super() get/set state. Since all of our get/set state methods\n # rely on super() to traverse the MRO, this will automatically pick\n # up both the Component and Data base classes.\n #\n\n def __call__(self, exception=True):\n \"\"\"\n Return the value of this parameter.\n \"\"\"\n if self._constructed:\n if not self._data:\n if self._mutable:\n # This will trigger populating the _data dict and setting\n # the _default, if applicable\n self[None]\n else:\n # Immutable Param defaults never get added to the\n # _data dict\n return self[None]\n return super(SimpleParam, self).__call__(exception=exception)\n if exception:\n raise ValueError(\n \"Evaluating the numeric value of parameter '%s' before\\n\\t\"\n \"the Param has been constructed (there is currently no \"\n \"value to return).\" % (self.name,) )\n\n def set_value(self, value, index=NoArgumentGiven):\n if index is NoArgumentGiven:\n index = None\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n if not self._data:\n self._data[index] = self\n super(SimpleParam, self).set_value(value, index)\n\n def is_constant(self):\n \"\"\"Determine if this SimpleParam is constant (and can be eliminated)\n\n Returns False if either unconstructed or mutable, as it must be kept\n in expressions (as it either doesn't have a value yet or the value\n can change later.\n \"\"\"\n return self._constructed and not self._mutable\n\n\nclass IndexedParam(Param):\n\n def __call__(self, exception=True):\n \"\"\"Compute the value of the parameter\"\"\"\n if exception:\n raise TypeError('Cannot compute the value of an indexed Param (%s)'\n % (self.name,) )\n\n", "path": "pyomo/core/base/param.py" } ]
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n__all__ = ['Param']\n\nimport sys\nimport types\nimport logging\nfrom weakref import ref as weakref_ref\n\nfrom pyomo.common.modeling import NoArgumentGiven\nfrom pyomo.common.timing import ConstructionTimer\nfrom pyomo.core.base.plugin import ModelComponentFactory\nfrom pyomo.core.base.component import ComponentData\nfrom pyomo.core.base.indexed_component import IndexedComponent, \\\n UnindexedComponent_set\nfrom pyomo.core.base.misc import apply_indexed_rule, apply_parameterized_indexed_rule\nfrom pyomo.core.base.numvalue import NumericValue, native_types, value\nfrom pyomo.core.base.set_types import Any\n\nfrom six import iteritems, iterkeys, next, itervalues\n\nlogger = logging.getLogger('pyomo.core')\n\ndef _raise_modifying_immutable_error(obj, index):\n if obj.is_indexed():\n name = \"%s[%s]\" % (obj.name, index)\n else:\n name = obj.name\n raise TypeError(\n \"Attempting to set the value of the immutable parameter \"\n \"%s after the parameter has been constructed. If you intend \"\n \"to change the value of this parameter dynamically, please \"\n \"declare the parameter as mutable [i.e., Param(mutable=True)]\"\n % (name,))\n\n\nclass _NotValid(object):\n \"\"\"A dummy type that is pickle-safe that we can use as the default\n value for Params to indicate that no valid value is present.\"\"\"\n pass\n\n\nclass _ParamData(ComponentData, NumericValue):\n \"\"\"\n This class defines the data for a mutable parameter.\n\n Constructor Arguments:\n owner The Param object that owns this data.\n value The value of this parameter.\n\n Public Class Attributes:\n value The numeric value of this variable.\n \"\"\"\n\n __slots__ = ('_value',)\n\n def __init__(self, component):\n #\n # The following is equivalent to calling\n # the base ComponentData constructor.\n #\n self._component = weakref_ref(component)\n #\n # The following is equivalent to calling the\n # base NumericValue constructor.\n #\n self._value = _NotValid\n\n def __getstate__(self):\n \"\"\"\n This method must be defined because this class uses slots.\n \"\"\"\n state = super(_ParamData, self).__getstate__()\n for i in _ParamData.__slots__:\n state[i] = getattr(self, i)\n return state\n\n # Note: because NONE of the slots on this class need to be edited,\n # we don't need to implement a specialized __setstate__ method.\n\n def clear(self):\n \"\"\"Clear the data in this component\"\"\"\n self._value = _NotValid\n\n # FIXME: ComponentData need to have pointers to their index to make\n # operations like validation efficient. As it stands now, if\n # set_value is called without specifying an index, this call\n # involves a linear scan of the _data dict.\n def set_value(self, value, idx=NoArgumentGiven):\n self._value = value\n if idx is NoArgumentGiven:\n idx = self.index()\n self.parent_component()._validate_value(idx, value)\n\n def __call__(self, exception=True):\n \"\"\"\n Return the value of this object.\n \"\"\"\n if self._value is _NotValid:\n if exception:\n raise ValueError(\n \"Error evaluating Param value (%s):\\n\\tThe Param value is \"\n \"currently set to an invalid value. This is\\n\\ttypically \"\n \"from a scalar Param or mutable Indexed Param without\\n\"\n \"\\tan initial or default value.\"\n % ( self.name, ))\n else:\n return None\n return self._value\n\n @property\n def value(self):\n \"\"\"Return the value for this variable.\"\"\"\n return self()\n @value.setter\n def value(self, val):\n \"\"\"Set the value for this variable.\"\"\"\n self.set_value(val)\n\n\n def is_fixed(self):\n \"\"\"\n Returns True because this value is fixed.\n \"\"\"\n return True\n\n def is_constant(self):\n \"\"\"\n Returns False because this is not a constant in an expression.\n \"\"\"\n return False\n\n def is_parameter_type(self):\n \"\"\"\n Returns True because this is a parameter object.\n \"\"\"\n return True\n\n def is_variable_type(self):\n \"\"\"\n Returns False because this is not a variable object.\n \"\"\"\n return False\n\n def is_expression_type(self):\n \"\"\"Returns False because this is not an expression\"\"\"\n return False\n\n def is_potentially_variable(self):\n \"\"\"\n Returns False because this object can never reference variables.\n \"\"\"\n return False\n\n def _compute_polynomial_degree(self, result):\n \"\"\"\n Returns 0 because this object can never reference variables.\n \"\"\"\n return 0\n\n def __nonzero__(self):\n \"\"\"Return True if the value is defined and non-zero.\"\"\"\n return bool(self())\n\n __bool__ = __nonzero__\n\n\[email protected](\"Parameter data that is used to define a model instance.\")\nclass Param(IndexedComponent):\n \"\"\"\n A parameter value, which may be defined over an index.\n\n Constructor Arguments:\n name \n The name of this parameter\n index \n The index set that defines the distinct parameters. By default, \n this is None, indicating that there is a single parameter.\n domain \n A set that defines the type of values that each parameter must be.\n within \n A set that defines the type of values that each parameter must be.\n validate \n A rule for validating this parameter w.r.t. data that exists in \n the model\n default \n A scalar, rule, or dictionary that defines default values for \n this parameter\n initialize \n A dictionary or rule for setting up this parameter with existing \n model data\n \"\"\"\n\n DefaultMutable = False\n\n def __new__(cls, *args, **kwds):\n if cls != Param:\n return super(Param, cls).__new__(cls)\n if not args or (args[0] is UnindexedComponent_set and len(args)==1):\n return SimpleParam.__new__(SimpleParam)\n else:\n return IndexedParam.__new__(IndexedParam)\n\n def __init__(self, *args, **kwd):\n self._rule = kwd.pop('rule', _NotValid )\n self._rule = kwd.pop('initialize', self._rule )\n self._validate = kwd.pop('validate', None )\n self.domain = kwd.pop('domain', Any )\n self.domain = kwd.pop('within', self.domain )\n self._mutable = kwd.pop('mutable', Param.DefaultMutable )\n self._default_val = kwd.pop('default', _NotValid )\n self._dense_initialize = kwd.pop('initialize_as_dense', False)\n #\n if 'repn' in kwd:\n logger.error(\n \"The 'repn' keyword is not a validate keyword argument for Param\")\n #\n if self.domain is None:\n self.domain = Any\n #\n kwd.setdefault('ctype', Param)\n IndexedComponent.__init__(self, *args, **kwd)\n\n def __len__(self):\n \"\"\"\n Return the number of component data objects stored by this\n component. If a default value is specified, then the\n length equals the number of items in the component index.\n \"\"\"\n if self._default_val is _NotValid:\n return len(self._data)\n return len(self._index)\n\n def __contains__(self, idx):\n \"\"\"\n Return true if the index is in the dictionary. If the default value\n is specified, then all members of the component index are valid.\n \"\"\"\n if self._default_val is _NotValid:\n return idx in self._data\n return idx in self._index\n\n def __iter__(self):\n \"\"\"\n Iterate over the keys in the dictionary. If the default value is\n specified, then iterate over all keys in the component index.\n \"\"\"\n if self._default_val is _NotValid:\n return self._data.__iter__()\n return self._index.__iter__()\n\n def is_expression_type(self):\n \"\"\"Returns False because this is not an expression\"\"\"\n return False\n\n #\n # These are \"sparse equivalent\" access / iteration methods that\n # only loop over the defined data.\n #\n\n def sparse_keys(self):\n \"\"\"Return a list of keys in the defined parameters\"\"\"\n return list(iterkeys(self._data))\n\n def sparse_values(self):\n \"\"\"Return a list of the defined param data objects\"\"\"\n return list(itervalues(self._data))\n\n def sparse_items(self):\n \"\"\"Return a list (index,data) tuples for defined parameters\"\"\"\n return list(iteritems(self._data))\n\n def sparse_iterkeys(self):\n \"\"\"Return an iterator for the keys in the defined parameters\"\"\"\n return iterkeys(self._data)\n\n def sparse_itervalues(self):\n \"\"\"Return an iterator for the defined param data objects\"\"\"\n return itervalues(self._data)\n\n def sparse_iteritems(self):\n \"\"\"Return an iterator of (index,data) tuples for defined parameters\"\"\"\n return iteritems(self._data)\n\n def extract_values(self):\n \"\"\"\n A utility to extract all index-value pairs defined for this\n parameter, returned as a dictionary.\n\n This method is useful in contexts where key iteration and\n repeated __getitem__ calls are too expensive to extract\n the contents of a parameter.\n \"\"\"\n if self._mutable:\n #\n # The parameter is mutable, parameter data are ParamData types.\n # Thus, we need to create a temporary dictionary that contains the\n # values from the ParamData objects.\n #\n ans = {}\n for key, param_value in self.iteritems():\n ans[key] = param_value()\n return ans\n elif not self.is_indexed():\n #\n # The parameter is a scalar, so we need to create a temporary\n # dictionary using the value for this parameter.\n #\n return { None: self() }\n else:\n #\n # The parameter is not mutable, so iteritems() can be\n # converted into a dictionary containing parameter values.\n #\n return dict( self.iteritems() )\n\n def extract_values_sparse(self):\n \"\"\"\n A utility to extract all index-value pairs defined with non-default\n values, returned as a dictionary.\n\n This method is useful in contexts where key iteration and\n repeated __getitem__ calls are too expensive to extract\n the contents of a parameter.\n \"\"\"\n if self._mutable:\n #\n # The parameter is mutable, parameter data are ParamData types.\n # Thus, we need to create a temporary dictionary that contains the\n # values from the ParamData objects.\n #\n ans = {}\n for key, param_value in self.sparse_iteritems():\n ans[key] = param_value()\n return ans\n elif not self.is_indexed():\n #\n # The parameter is a scalar, so we need to create a temporary\n # dictionary using the value for this parameter.\n #\n return { None: self() }\n else:\n #\n # The parameter is not mutable, so sparse_iteritems() can be\n # converted into a dictionary containing parameter values.\n #\n return dict( self.sparse_iteritems() )\n\n def store_values(self, new_values, check=True):\n \"\"\"\n A utility to update a Param with a dictionary or scalar.\n\n If check=True, then both the index and value\n are checked through the __getitem__ method. Using check=False\n should only be used by developers!\n \"\"\"\n if not self._mutable:\n _raise_modifying_immutable_error(self, '*')\n #\n _srcType = type(new_values)\n _isDict = _srcType is dict or ( \\\n hasattr(_srcType, '__getitem__')\n and not isinstance(new_values, NumericValue) )\n #\n if check:\n if _isDict:\n for index, new_value in iteritems(new_values):\n self[index] = new_value\n else:\n for index in self._index:\n self[index] = new_values\n return\n #\n # The argument check is False, so we bypass almost all of the\n # Param logic for ensuring data integrity.\n #\n if self.is_indexed():\n if _isDict:\n # It is possible that the Param is sparse and that the\n # index is not already in the _data dict. As these\n # cases are rare, we will recover from the exception\n # instead of incurring the penalty of checking.\n for index, new_value in iteritems(new_values):\n if index not in self._data:\n self._data[index] = _ParamData(self)\n self._data[index]._value = new_value\n else:\n # For scalars, we will choose an approach based on\n # how \"dense\" the Param is\n if not self._data: # empty\n for index in self._index:\n p = self._data[index] = _ParamData(self)\n p._value = new_values\n elif len(self._data) == len(self._index):\n for index in self._index:\n self._data[index]._value = new_values\n else:\n for index in self._index:\n if index not in self._data:\n self._data[index] = _ParamData(self)\n self._data[index]._value = new_values\n else:\n #\n # Initialize a scalar\n #\n if _isDict:\n if None not in new_values:\n raise RuntimeError(\n \"Cannot store value for scalar Param %s:\\n\\tNo value \"\n \"with index None in the new values dict.\"\n % (self.name,))\n new_values = new_values[None]\n # scalars have to be handled differently\n self[None] = new_values\n\n def set_default(self, val):\n \"\"\"\n Perform error checks and then set the default value for this parameter.\n\n NOTE: this test will not validate the value of function return values.\n \"\"\"\n if self._constructed \\\n and val is not _NotValid \\\n and type(val) in native_types \\\n and val not in self.domain:\n raise ValueError(\n \"Default value (%s) is not valid for Param %s domain %s\" %\n (str(val), self.name, self.domain.name))\n self._default_val = val\n\n def default(self):\n \"\"\"\n Return the value of the parameter default.\n\n Possible values:\n None \n No default value is provided.\n Numeric \n A constant value that is the default value for all undefined \n parameters.\n Function \n f(model, i) returns the value for the default value for \n parameter i\n \"\"\"\n return self._default_val\n\n def _getitem_when_not_present(self, index):\n \"\"\"\n Returns the default component data value\n \"\"\"\n #\n # Local values\n #\n val = self._default_val\n if val is _NotValid:\n # We should allow the creation of mutable params without\n # a default value, as long as *solving* a model without\n # reasonable values produces an informative error.\n if self._mutable:\n # Note: _ParamData defaults to _NotValid\n ans = self._data[index] = _ParamData(self)\n return ans\n if self.is_indexed():\n idx_str = '%s[%s]' % (self.name, index,)\n else:\n idx_str = '%s' % (self.name,)\n raise ValueError(\n \"Error retrieving immutable Param value (%s):\\n\\tThe Param \"\n \"value is undefined and no default value is specified.\"\n % ( idx_str,) )\n\n _default_type = type(val)\n _check_value_domain = True\n if _default_type in native_types:\n #\n # The set_default() method validates the domain of native types, so\n # we can skip the check on the value domain.\n #\n _check_value_domain = False\n elif _default_type is types.FunctionType:\n val = apply_indexed_rule(self, val, self.parent_block(), index)\n elif hasattr(val, '__getitem__') and (\n not isinstance(val, NumericValue) or val.is_indexed() ):\n # Things that look like Dictionaries should be allowable. This\n # includes other IndexedComponent objects.\n val = val[index]\n else:\n # this is something simple like a non-indexed component\n pass\n\n #\n # If the user wants to validate values, we need to validate the\n # default value as well. For Mutable Params, this is easy:\n # _setitem_impl will inject the value into _data and\n # then call validate.\n #\n if self._mutable:\n return self._setitem_when_not_present(index, val)\n #\n # For immutable params, we never inject the default into the data\n # dictionary. This will break validation, as the validation rule is\n # allowed to assume the data is already present (actually, it will\n # die on infinite recursion, as Param.__getitem__() will re-call\n # _getitem_when_not_present).\n #\n # So, we will do something very inefficient: if we are\n # validating, we will inject the value into the dictionary,\n # call validate, and remove it.\n #\n if _check_value_domain or self._validate:\n try:\n self._data[index] = val\n self._validate_value(index, val, _check_value_domain)\n finally:\n del self._data[index]\n\n return val\n\n def _setitem_impl(self, index, obj, value):\n \"\"\"The __setitem__ method performs significant validation around the\n input indices, particularly when the index value is new. In\n various contexts, we don't need to incur this overhead\n (e.g. during initialization). The _setitem_impl\n assumes the input value is in the set native_types\n\n \"\"\"\n #\n # We need to ensure that users don't override the value for immutable\n # parameters.\n #\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n #\n # Params should contain *values*. Note that if we just call\n # value(), then that forces the value to be a numeric value.\n # Notably, we allow Params with domain==Any to hold strings, tuples,\n # etc. The following lets us use NumericValues to initialize\n # Params, but is optimized to check for \"known\" native types to\n # bypass a potentially expensive isinstance()==False call.\n #\n if value.__class__ not in native_types:\n if isinstance(value, NumericValue):\n value = value()\n #\n # Set the value depending on the type of param value.\n #\n if self._mutable:\n obj.set_value(value, index)\n return obj\n else:\n self._data[index] = value\n # Because we do not have a _ParamData, we cannot rely on the\n # validation that occurs in _ParamData.set_value()\n self._validate_value(index, value)\n return value\n\n def _setitem_when_not_present(self, index, value, _check_domain=True):\n #\n # We need to ensure that users don't override the value for immutable\n # parameters.\n #\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n #\n # Params should contain *values*. Note that if we just call\n # value(), then that forces the value to be a numeric value.\n # Notably, we allow Params with domain==Any to hold strings, tuples,\n # etc. The following lets us use NumericValues to initialize\n # Params, but is optimized to check for \"known\" native types to\n # bypass a potentially expensive isinstance()==False call.\n #\n if value.__class__ not in native_types:\n if isinstance(value, NumericValue):\n value = value()\n\n #\n # Set the value depending on the type of param value.\n #\n try:\n if index is None and not self.is_indexed():\n self._data[None] = self\n self.set_value(value, index)\n return self\n elif self._mutable:\n obj = self._data[index] = _ParamData(self)\n obj.set_value(value, index)\n return obj\n else:\n self._data[index] = value\n # Because we do not have a _ParamData, we cannot rely on the\n # validation that occurs in _ParamData.set_value()\n self._validate_value(index, value, _check_domain)\n return value\n except:\n del self._data[index]\n raise\n\n\n def _validate_value(self, index, value, validate_domain=True):\n \"\"\"\n Validate a given input/value pair.\n \"\"\"\n #\n # Check if the value is valid within the current domain\n #\n if validate_domain and not value in self.domain:\n raise ValueError(\n \"Invalid parameter value: %s[%s] = '%s', value type=%s.\\n\"\n \"\\tValue not in parameter domain %s\" %\n (self.name, index, value, type(value), self.domain.name))\n if self._validate:\n valid = apply_parameterized_indexed_rule(\n self, self._validate, self.parent_block(), value, index )\n if not valid:\n raise ValueError(\n \"Invalid parameter value: %s[%s] = '%s', value type=%s.\\n\"\n \"\\tValue failed parameter validation rule\" %\n ( self.name, index, value, type(value) ) )\n\n def _initialize_from(self, _init):\n \"\"\"\n Initialize data from a rule or data\n \"\"\"\n _init_type = type(_init)\n _isDict = _init_type is dict\n\n if _isDict or _init_type in native_types:\n #\n # We skip the other tests if we have a dictionary or constant\n # value, as these are the most common cases.\n #\n pass\n\n elif _init_type is types.FunctionType:\n #\n # Initializing from a function\n #\n if not self.is_indexed():\n #\n # A scalar value has a single value.\n # We call __setitem__, which does checks on the value.\n #\n self._setitem_when_not_present(None, _init(self.parent_block()))\n return\n else:\n #\n # An indexed parameter, where we call the function for each\n # index.\n #\n self_parent = self.parent_block()\n #\n try:\n #\n # Create an iterator for the indices. We assume that\n # it returns flattened tuples. Otherwise,\n # the validation process is far too expensive.\n #\n _iter = self._index.__iter__()\n idx = next(_iter)\n #\n # If a function returns a dict (or\n # dict-like thing), then we initialize the Param object\n # by reseting _init and _isDict\n #\n # Note that this logic allows the user to call a\n # function without an index\n #\n val = apply_indexed_rule(self, _init, self_parent, idx)\n\n #\n # The following is a simplification of the main\n # _initialize_from logic. The idea is that if the\n # function returns a scalar-like thing, use it to\n # initialize this index and re-call the function for\n # the next value. However, if the function returns\n # something that is dict-like, then use the dict to\n # initialize everything and do not re-call the\n # initialize function.\n #\n # Note: while scalar components are technically\n # \"dict-like\", we will treat them as scalars and\n # re-call the initialize function.\n #\n _dict_like = False\n if type(val) is dict:\n _dict_like = True\n elif isinstance(val, IndexedComponent):\n _dict_like = val.is_indexed()\n elif hasattr(val, '__getitem__') \\\n and not isinstance(val, NumericValue):\n try:\n for x in _init:\n _init.__getitem__(x)\n _dict_like = True\n except:\n pass\n\n if _dict_like:\n _init = val\n _isDict = True\n else:\n #\n # At this point, we know the value is specific\n # to this index (i.e., not likely to be a\n # dict-like thing), and that the index is valid;\n # so, it is safe to use _setitem_impl\n # (which will perform all the domain /\n # validation checking)\n #\n self._setitem_when_not_present(idx, val)\n #\n # Now iterate over the rest of the index set.\n #\n for idx in _iter:\n self._setitem_when_not_present(\n idx, apply_indexed_rule(\n self, _init, self_parent, idx))\n return\n except StopIteration:\n #\n # The index set was empty... The parameter is indexed by\n # an empty set, or an empty set tuple. Rare, but it has\n # happened.\n #\n return\n\n elif isinstance(_init, NumericValue):\n #\n # Reduce NumericValues to scalars. This allows us to treat\n # scalar components as numbers and not\n # as indexed components with a index set of [None]\n #\n _init = _init()\n\n elif isinstance(_init, IndexedComponent):\n #\n # Ideally, we want to reduce IndexedComponents to\n # a dict, but without \"densifying\" it. However, since\n # there is no way to (easily) get the default value, we\n # will take the \"less surprising\" route of letting the\n # source become dense, so that we get the expected copy.\n #\n # TBD: Are there use-cases where we want to maintain sparsity?\n #\n _init_keys_len = sum(1 for _ in _init.keys())\n sparse_src = len(_init) != _init_keys_len\n tmp = dict( _init.iteritems() )\n if sparse_src and len(_init) == _init_keys_len:\n logger.warning(\"\"\"\nInitializing Param %s using a sparse mutable indexed component (%s).\nThis has resulted in the conversion of the source to dense form.\n\"\"\" % (self.name, _init.name))\n _init = tmp\n _isDict = True\n\n #\n # If the _init is not a native dictionary, but it\n # behaves like one (that is, it could be converted to a\n # dict with \"dict((key,_init[key]) for key in _init)\"),\n # then we will treat it as such\n #\n # TODO: Establish a use-case for this. This iteration is\n # expensive.\n #\n if not _isDict and hasattr(_init, '__getitem__'):\n try:\n _isDict = True\n for x in _init:\n _init.__getitem__(x)\n except:\n _isDict = False\n #\n # Now, we either have a scalar or a dictionary\n #\n if _isDict:\n #\n # Because this is a user-specified dictionary, we\n # must use the normal (expensive) __setitem__ route\n # so that the individual indices are validated.\n #\n for key in _init:\n self[key] = _init[key]\n else:\n try:\n #\n # A constant is being supplied as a default to\n # a parameter. This happens for indexed parameters,\n # particularly when dealing with mutable parameters.\n #\n # We look at the first iteration index separately to\n # to validate the value against the domain once.\n #\n _iter = self._index.__iter__()\n idx = next(_iter)\n self._setitem_when_not_present(idx, _init)\n #\n # Note: the following is safe for both indexed and\n # non-indexed parameters: for non-indexed, the first\n # idx (above) will be None, and the for-loop below\n # will NOT be called.\n #\n if self._mutable:\n _init = self[idx]._value\n for idx in _iter:\n self._setitem_when_not_present(idx, _init)\n else:\n _init = self[idx]\n for idx in _iter:\n self._setitem_when_not_present(\n idx, _init, _check_domain=False )\n except StopIteration:\n #\n # The index set was empty...\n # The parameter is indexed by an empty set, or an empty set tuple.\n # Rare, but it has happened.\n #\n pass\n\n def construct(self, data=None):\n \"\"\"\n Initialize this component.\n\n A parameter is constructed using the initial data or\n the data loaded from an external source. We first\n set all the values based on self._rule, and then\n allow the data dictionary to overwrite anything.\n\n Note that we allow an undefined Param value to be\n constructed. We throw an exception if a user tries\n to use an uninitialized Param.\n \"\"\"\n if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover\n logger.debug(\"Constructing Param, name=%s, from data=%s\"\n % ( self.name, str(data) ))\n #\n if self._constructed:\n return\n timer = ConstructionTimer(self)\n #\n # If the default value is a simple type, we check it versus\n # the domain.\n #\n val = self._default_val\n if val is not _NotValid \\\n and type(val) in native_types \\\n and val not in self.domain:\n raise ValueError(\n \"Default value (%s) is not valid for Param %s domain %s\" %\n (str(val), self.name, self.domain.name))\n #\n # Flag that we are in the \"during construction\" phase\n #\n self._constructed = None\n #\n # Step #1: initialize data from rule value\n #\n if self._rule is not _NotValid:\n self._initialize_from(self._rule)\n #\n # Step #2: allow any user-specified (external) data to override\n # the initialization\n #\n if data is not None:\n try:\n for key, val in iteritems(data):\n self._setitem_when_not_present(\n self._validate_index(key), val)\n except Exception:\n msg = sys.exc_info()[1]\n if type(data) is not dict:\n raise ValueError(\n \"Attempting to initialize parameter=%s with data=%s.\\n\"\n \"\\tData type is not a dictionary, and a dictionary is \"\n \"expected.\" % (self.name, str(data)) )\n else:\n raise RuntimeError(\n \"Failed to set value for param=%s, index=%s, value=%s.\"\n \"\\n\\tsource error message=%s\"\n % (self.name, str(key), str(val), str(msg)) )\n #\n # Flag that things are fully constructed now (and changing an\n # inmutable Param is now an exception).\n #\n self._constructed = True\n\n # populate all other indices with default data\n # (avoids calling _set_contains on self._index at runtime)\n if self._dense_initialize:\n self.to_dense_data()\n timer.report()\n\n def reconstruct(self, data=None):\n \"\"\"\n Reconstruct this parameter object. This is particularly useful\n for cases where an initialize rule is provided. An initialize\n rule can return an expression that is a function of other\n parameters, so reconstruction can account for changes in dependent\n parameters.\n\n Only mutable parameters can be reconstructed. Otherwise, the\n changes would not be propagated into expressions in objectives\n or constraints.\n \"\"\"\n if not self._mutable:\n raise RuntimeError(\n \"Cannot invoke reconstruct method of immutable Param %s\"\n % (self.name,))\n IndexedComponent.reconstruct(self, data=data)\n\n def _pprint(self):\n \"\"\"\n Return data that will be printed for this component.\n \"\"\"\n if self._default_val is _NotValid:\n default = \"None\" # for backwards compatibility in reporting\n elif type(self._default_val) is types.FunctionType:\n default = \"(function)\"\n else:\n default = str(self._default_val)\n if self._mutable or not self.is_indexed():\n dataGen = lambda k, v: [ v._value, ]\n else:\n dataGen = lambda k, v: [ v, ]\n return ( [(\"Size\", len(self)),\n (\"Index\", self._index if self.is_indexed() else None),\n (\"Domain\", self.domain.name),\n (\"Default\", default),\n (\"Mutable\", self._mutable),\n ],\n self.sparse_iteritems(),\n (\"Value\",),\n dataGen,\n )\n\n\nclass SimpleParam(_ParamData, Param):\n\n def __init__(self, *args, **kwds):\n Param.__init__(self, *args, **kwds)\n _ParamData.__init__(self, component=self)\n\n #\n # Since this class derives from Component and Component.__getstate__\n # just packs up the entire __dict__ into the state dict, there s\n # nothng special that we need to do here. We will just defer to the\n # super() get/set state. Since all of our get/set state methods\n # rely on super() to traverse the MRO, this will automatically pick\n # up both the Component and Data base classes.\n #\n\n def __call__(self, exception=True):\n \"\"\"\n Return the value of this parameter.\n \"\"\"\n if self._constructed:\n if not self._data:\n if self._mutable:\n # This will trigger populating the _data dict and setting\n # the _default, if applicable\n self[None]\n else:\n # Immutable Param defaults never get added to the\n # _data dict\n return self[None]\n return super(SimpleParam, self).__call__(exception=exception)\n if exception:\n raise ValueError(\n \"Evaluating the numeric value of parameter '%s' before\\n\\t\"\n \"the Param has been constructed (there is currently no \"\n \"value to return).\" % (self.name,) )\n\n def set_value(self, value, index=NoArgumentGiven):\n if index is NoArgumentGiven:\n index = None\n if self._constructed and not self._mutable:\n _raise_modifying_immutable_error(self, index)\n if not self._data:\n self._data[index] = self\n super(SimpleParam, self).set_value(value, index)\n\n def is_constant(self):\n \"\"\"Determine if this SimpleParam is constant (and can be eliminated)\n\n Returns False if either unconstructed or mutable, as it must be kept\n in expressions (as it either doesn't have a value yet or the value\n can change later.\n \"\"\"\n return self._constructed and not self._mutable\n\n\nclass IndexedParam(Param):\n\n def __call__(self, exception=True):\n \"\"\"Compute the value of the parameter\"\"\"\n if exception:\n raise TypeError('Cannot compute the value of an indexed Param (%s)'\n % (self.name,) )\n\n", "path": "pyomo/core/base/param.py" } ]
diff --git a/pyomo/core/base/param.py b/pyomo/core/base/param.py index 5c9a943642b..f3a639f5afc 100644 --- a/pyomo/core/base/param.py +++ b/pyomo/core/base/param.py @@ -601,6 +601,7 @@ def _setitem_when_not_present(self, index, value, _check_domain=True): return value except: del self._data[index] + raise def _validate_value(self, index, value, validate_domain=True): diff --git a/pyomo/core/tests/unit/test_param.py b/pyomo/core/tests/unit/test_param.py index 56c8c333710..d3cbbf8fd06 100644 --- a/pyomo/core/tests/unit/test_param.py +++ b/pyomo/core/tests/unit/test_param.py @@ -1169,6 +1169,58 @@ def rule(model, i): return 0.0 model.p = Param(model.A, initialize=rule) + def test_param_validate(self): + """Test Param `validate` and `within` throw ValueError when not valid. + + The `within` argument will catch the ValueError, log extra information + with of an "ERROR" message, and reraise the ValueError. + + 1. Immutable Param (unindexed) + 2. Immutable Param (indexed) + 3. Immutable Param (arbitrary validation rule) + 4. Mutable Param (unindexed) + 5. Mutable Param (indexed) + 6. Mutable Param (arbitrary validation rule) + """ + def validation_rule(model, value): + """Arbitrary validation rule that always returns False.""" + return False + + # 1. Immutable Param (unindexed) + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): + m = ConcreteModel() + m.p1 = Param(initialize=-3, within=NonNegativeReals) + + # 2. Immutable Param (indexed) + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): + m = ConcreteModel() + m.A = RangeSet(1, 2) + m.p2 = Param(m.A, initialize=-3, within=NonNegativeReals) + + # 3. Immutable Param (arbitrary validation rule) + with self.assertRaisesRegex(ValueError, "Invalid parameter value"): + m = ConcreteModel() + m.p5 = Param(initialize=1, validate=validation_rule) + + # 4. Mutable Param (unindexed) + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): + m = ConcreteModel() + m.p3 = Param(within=NonNegativeReals, mutable=True) + m.p3 = -3 + + # 5. Mutable Param (indexed) + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): + m = ConcreteModel() + m.A = RangeSet(1, 2) + m.p4 = Param(m.A, within=NonNegativeReals, mutable=True) + m.p4[1] = -3 + + # 6. Mutable Param (arbitrary validation rule) + with self.assertRaisesRegex(ValueError, "Invalid parameter value"): + m = ConcreteModel() + m.p6 = Param(mutable=True, validate=validation_rule) + m.p6 = 1 + def test_get_uninitialized(self): model=AbstractModel() model.a = Param()
urllib3__urllib3-2484
Test development versions of Python Uses the `3.11-dev` and `nightly` deadsnakes release streams to test urllib3 against Python versions.
[ { "content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_python2(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
[ { "content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_python2(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5fecb46d9e..4b47c818c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,6 +50,10 @@ jobs: os: ubuntu-latest experimental: false nox-session: test-3.9 + - python-version: 3.11-dev + os: ubuntu-latest + experimental: true + nox-session: test-3.11 runs-on: ${{ matrix.os }} name: ${{ fromJson('{"macos-latest":"macOS","windows-latest":"Windows","ubuntu-latest":"Ubuntu"}')[matrix.os] }} ${{ matrix.python-version }} ${{ matrix.nox-session}} @@ -70,7 +74,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Set Up Python - ${{ matrix.python-version }} + - name: Set Up Python (Development version) - ${{ matrix.python-version }} uses: deadsnakes/[email protected] if: endsWith(matrix.python-version, '-dev') with: diff --git a/noxfile.py b/noxfile.py index ea529abc71..459bb8f12e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -57,7 +57,7 @@ def tests_impl( session.run("coverage", "xml") [email protected](python=["3.7", "3.8", "3.9", "3.10", "pypy"]) [email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"]) def test(session: nox.Session) -> None: tests_impl(session)
mathesar-foundation__mathesar-2911
`AbstractType: Money, URI, Email` columns throw error when summarization is applied ## Description `Money` columns can not be aggregated based on some other grouping column. (There won't be any problem, if `Money` column is the grouping column) ## Expected behavior Users should be able to aggregate `Money` column based on another grouping column. ## To Reproduce - Create a table, add two columns, one of them should be `Money` column. - Go to **Data Explorer**, Select the other column first, then add `Money` column. - Then go to `Transformation Step` and `Summarize`. Not every table will produce this error. (refer to **Additional Context**) Screencast: https://github.com/centerofci/mathesar/assets/64671908/518006d9-2c5c-4a19-b935-54e0714c4ccf ## Environment - OS: Ubuntu 22.10 (Kinetic Kudu) - Browser: Google Chrome - Browser Version: 112.0.5615.29 ## Additional context **The folowing table will not produce the error.** <img width="395" alt="image" src="https://github.com/centerofci/mathesar/assets/64671908/9b14743a-dc46-487e-bce7-15f98875acdd"> **While this modification will produce the error**. <img width="396" alt="image" src="https://github.com/centerofci/mathesar/assets/64671908/e35fcb30-e9cd-4f8a-9aaf-3b4dbdb23cd7">
[ { "content": "\"\"\"\nHere we define DBFunction subclasses that are defined in terms of other DBFunction subclasses\n(these DBFunctions are packages or combinations of other DBFunctions). We do this to workaround\nMathesar filters not supporting composition.\n\"\"\"\n\nfrom abc import abstractmethod\n\nfrom db.functions import hints, base\nfrom db.types.custom.uri import URIFunction\nfrom db.types.custom.email import EMAIL_DOMAIN_NAME\n\n\nclass DBFunctionPacked(base.DBFunction):\n \"\"\"\n A DBFunction that is meant to be unpacked into another DBFunction. A way to define a DBFunction\n as a combination of DBFunctions. Its to_sa_expression method is not used. Its concrete\n implementations are expected to implement the unpack method.\n \"\"\"\n @staticmethod\n def to_sa_expression(*_):\n raise Exception(\"DBFunctionPacked.to_sa_expression should never be used.\")\n\n @abstractmethod\n def unpack(self):\n \"\"\"\n Should return a DBFunction instance with self.parameters forwarded to it. A way to define\n a DBFunction in terms of other DBFunctions.\n \"\"\"\n pass\n\n\nclass DistinctArrayAgg(DBFunctionPacked):\n \"\"\"\n These two functions together are meant to be a user-friendly alternative to plain array_agg.\n\n See: https://github.com/centerofci/mathesar/issues/2059\n \"\"\"\n id = 'distinct_aggregate_to_array'\n name = 'distinct aggregate to array'\n hints = tuple([\n hints.aggregation,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n return base.ArrayAgg([\n base.Distinct([param0]),\n ])\n\n\nclass NotNull(DBFunctionPacked):\n id = 'not_null'\n name = 'Is not null'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(1),\n hints.parameter(0, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n return base.Not([\n base.Null([param0]),\n ])\n\n\nclass LesserOrEqual(DBFunctionPacked):\n id = 'lesser_or_equal'\n name = 'is lesser or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.all_parameters(hints.comparable),\n hints.mathesar_filter,\n hints.use_this_alias_when(\"is before or same as\", hints.point_in_time),\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Or([\n base.Lesser([param0, param1]),\n base.Equal([param0, param1]),\n ])\n\n\nclass GreaterOrEqual(DBFunctionPacked):\n id = 'greater_or_equal'\n name = 'is greater or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.all_parameters(hints.comparable),\n hints.mathesar_filter,\n hints.use_this_alias_when(\"is before or same as\", hints.point_in_time),\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Or([\n base.Greater([param0, param1]),\n base.Equal([param0, param1]),\n ])\n\n\nclass ArrayLengthEquals(DBFunctionPacked):\n id = 'array_length_equals'\n name = 'Number of elements is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Equal([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthGreaterThan(DBFunctionPacked):\n id = 'array_length_greater_than'\n name = 'Number of elements is greater than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Greater([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthLessThan(DBFunctionPacked):\n id = 'array_length_lesser_than'\n name = 'Number of elements is lesser than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Lesser([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthGreaterOrEqual(DBFunctionPacked):\n id = 'array_length_greater_than_or_equal'\n name = 'Number of elements is greater than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return GreaterOrEqual([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthLessOrEqual(DBFunctionPacked):\n id = 'array_length_lesser_than_or_equal'\n name = 'Number of elements is lesser than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return LesserOrEqual([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayNotEmpty(DBFunctionPacked):\n id = 'array_not_empty'\n name = 'Is not empty'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Greater([\n base.ArrayLength([param0, param1]),\n 0,\n ])\n\n\nclass JsonLengthEquals(DBFunctionPacked):\n id = 'json_array_length_equals'\n name = 'Number of elements is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthGreaterThan(DBFunctionPacked):\n id = 'json_array_length_greater_than'\n name = 'Number of elements is greater than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Greater([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthGreaterorEqual(DBFunctionPacked):\n id = 'json_array_length_greater_or_equal'\n name = 'Number of elements is greater than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return GreaterOrEqual([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthLessThan(DBFunctionPacked):\n id = 'json_array_length_less_than'\n name = 'Number of elements is less than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Lesser([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthLessorEqual(DBFunctionPacked):\n id = 'json_array_length_less_or_equal'\n name = 'Number of elements is less than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return LesserOrEqual([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonNotEmpty(DBFunctionPacked):\n id = 'json_array_not_empty'\n name = 'Is not empty'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(1),\n hints.parameter(0, hints.json_array),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n return base.Greater([\n base.JsonArrayLength([param0]),\n 0,\n ])\n\n\nclass URIAuthorityContains(DBFunctionPacked):\n id = 'uri_authority_contains'\n name = 'URI authority contains'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.uri),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([URIFunction.AUTHORITY])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Contains([\n base.ExtractURIAuthority([param0]),\n param1,\n ])\n\n\nclass URISchemeEquals(DBFunctionPacked):\n id = 'uri_scheme_equals'\n name = 'URI scheme is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.uri),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([URIFunction.SCHEME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.ExtractURIScheme([param0]),\n param1,\n ])\n\n\nclass EmailDomainContains(DBFunctionPacked):\n id = 'email_domain_contains'\n name = 'email domain contains'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.email),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([EMAIL_DOMAIN_NAME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Contains([\n base.ExtractEmailDomain([param0]),\n param1,\n ])\n\n\nclass EmailDomainEquals(DBFunctionPacked):\n id = 'email_domain_equals'\n name = 'email domain is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.email),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([EMAIL_DOMAIN_NAME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.ExtractEmailDomain([param0]),\n param1,\n ])\n", "path": "db/functions/packed.py" } ]
[ { "content": "\"\"\"\nHere we define DBFunction subclasses that are defined in terms of other DBFunction subclasses\n(these DBFunctions are packages or combinations of other DBFunctions). We do this to workaround\nMathesar filters not supporting composition.\n\"\"\"\n\nfrom abc import abstractmethod\n\nfrom db.functions import hints, base\nfrom db.types.custom.uri import URIFunction\nfrom db.types.custom.email import EMAIL_DOMAIN_NAME\n\n\nclass DBFunctionPacked(base.DBFunction):\n \"\"\"\n A DBFunction that is meant to be unpacked into another DBFunction. A way to define a DBFunction\n as a combination of DBFunctions. Its to_sa_expression method is not used. Its concrete\n implementations are expected to implement the unpack method.\n \"\"\"\n @staticmethod\n def to_sa_expression(*_):\n raise Exception(\"DBFunctionPacked.to_sa_expression should never be used.\")\n\n @abstractmethod\n def unpack(self):\n \"\"\"\n Should return a DBFunction instance with self.parameters forwarded to it. A way to define\n a DBFunction in terms of other DBFunctions.\n \"\"\"\n pass\n\n\nclass DistinctArrayAgg(DBFunctionPacked):\n \"\"\"\n These two functions together are meant to be a user-friendly alternative to plain array_agg.\n\n See: https://github.com/centerofci/mathesar/issues/2059\n \"\"\"\n id = 'distinct_aggregate_to_array'\n name = 'distinct aggregate to array'\n hints = tuple([\n hints.aggregation,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param0 = base._maybe_downcast(param0)\n return base.ArrayAgg([\n base.Distinct([param0]),\n ])\n\n\nclass NotNull(DBFunctionPacked):\n id = 'not_null'\n name = 'Is not null'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(1),\n hints.parameter(0, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n return base.Not([\n base.Null([param0]),\n ])\n\n\nclass LesserOrEqual(DBFunctionPacked):\n id = 'lesser_or_equal'\n name = 'is lesser or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.all_parameters(hints.comparable),\n hints.mathesar_filter,\n hints.use_this_alias_when(\"is before or same as\", hints.point_in_time),\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Or([\n base.Lesser([param0, param1]),\n base.Equal([param0, param1]),\n ])\n\n\nclass GreaterOrEqual(DBFunctionPacked):\n id = 'greater_or_equal'\n name = 'is greater or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.all_parameters(hints.comparable),\n hints.mathesar_filter,\n hints.use_this_alias_when(\"is before or same as\", hints.point_in_time),\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Or([\n base.Greater([param0, param1]),\n base.Equal([param0, param1]),\n ])\n\n\nclass ArrayLengthEquals(DBFunctionPacked):\n id = 'array_length_equals'\n name = 'Number of elements is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Equal([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthGreaterThan(DBFunctionPacked):\n id = 'array_length_greater_than'\n name = 'Number of elements is greater than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Greater([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthLessThan(DBFunctionPacked):\n id = 'array_length_lesser_than'\n name = 'Number of elements is lesser than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return base.Lesser([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthGreaterOrEqual(DBFunctionPacked):\n id = 'array_length_greater_than_or_equal'\n name = 'Number of elements is greater than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return GreaterOrEqual([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayLengthLessOrEqual(DBFunctionPacked):\n id = 'array_length_lesser_than_or_equal'\n name = 'Number of elements is lesser than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(3),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.parameter(2, hints.numeric),\n hints.mathesar_filter\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n param2 = self.parameters[2]\n return LesserOrEqual([\n base.ArrayLength([param0, param1]),\n param2\n ])\n\n\nclass ArrayNotEmpty(DBFunctionPacked):\n id = 'array_not_empty'\n name = 'Is not empty'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Greater([\n base.ArrayLength([param0, param1]),\n 0,\n ])\n\n\nclass JsonLengthEquals(DBFunctionPacked):\n id = 'json_array_length_equals'\n name = 'Number of elements is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n # TODO any is too generic\n hints.parameter(1, hints.any),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthGreaterThan(DBFunctionPacked):\n id = 'json_array_length_greater_than'\n name = 'Number of elements is greater than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Greater([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthGreaterorEqual(DBFunctionPacked):\n id = 'json_array_length_greater_or_equal'\n name = 'Number of elements is greater than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return GreaterOrEqual([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthLessThan(DBFunctionPacked):\n id = 'json_array_length_less_than'\n name = 'Number of elements is less than'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Lesser([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonLengthLessorEqual(DBFunctionPacked):\n id = 'json_array_length_less_or_equal'\n name = 'Number of elements is less than or equal to'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.json_array),\n hints.parameter(1, hints.numeric),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return LesserOrEqual([\n base.JsonArrayLength([param0]),\n param1,\n ])\n\n\nclass JsonNotEmpty(DBFunctionPacked):\n id = 'json_array_not_empty'\n name = 'Is not empty'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(1),\n hints.parameter(0, hints.json_array),\n hints.mathesar_filter,\n ])\n\n def unpack(self):\n param0 = self.parameters[0]\n return base.Greater([\n base.JsonArrayLength([param0]),\n 0,\n ])\n\n\nclass URIAuthorityContains(DBFunctionPacked):\n id = 'uri_authority_contains'\n name = 'URI authority contains'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.uri),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([URIFunction.AUTHORITY])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Contains([\n base.ExtractURIAuthority([param0]),\n param1,\n ])\n\n\nclass URISchemeEquals(DBFunctionPacked):\n id = 'uri_scheme_equals'\n name = 'URI scheme is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.uri),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([URIFunction.SCHEME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.ExtractURIScheme([param0]),\n param1,\n ])\n\n\nclass EmailDomainContains(DBFunctionPacked):\n id = 'email_domain_contains'\n name = 'email domain contains'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.email),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([EMAIL_DOMAIN_NAME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Contains([\n base.ExtractEmailDomain([param0]),\n param1,\n ])\n\n\nclass EmailDomainEquals(DBFunctionPacked):\n id = 'email_domain_equals'\n name = 'email domain is'\n hints = tuple([\n hints.returns(hints.boolean),\n hints.parameter_count(2),\n hints.parameter(0, hints.email),\n hints.parameter(1, hints.string_like),\n hints.mathesar_filter,\n ])\n depends_on = tuple([EMAIL_DOMAIN_NAME])\n\n def unpack(self):\n param0 = self.parameters[0]\n param1 = self.parameters[1]\n return base.Equal([\n base.ExtractEmailDomain([param0]),\n param1,\n ])\n", "path": "db/functions/packed.py" } ]
diff --git a/db/functions/packed.py b/db/functions/packed.py index b1620a9b06..8c8f46b2f4 100644 --- a/db/functions/packed.py +++ b/db/functions/packed.py @@ -44,6 +44,7 @@ class DistinctArrayAgg(DBFunctionPacked): def unpack(self): param0 = self.parameters[0] + param0 = base._maybe_downcast(param0) return base.ArrayAgg([ base.Distinct([param0]), ]) diff --git a/db/tests/resources/library_without_checkouts.sql b/db/tests/resources/library_without_checkouts.sql index f275c5a733..606e973858 100644 --- a/db/tests/resources/library_without_checkouts.sql +++ b/db/tests/resources/library_without_checkouts.sql @@ -255,7 +255,7 @@ INSERT INTO "Items" (id, "Acquisition Date", "Acquisition Price", "Publication") (107, '1990-09-16', 11.42, 4), (106, '2014-03-21', 13.55, 4), (108, '2008-01-30', 12.08, 6), -(105, '2008-10-29', 13.43, 7), +(105, '2008-10-29', 4.66, 7), (104, '2010-07-09', 4.66, 7), (102, '1995-02-03', 0.10, 8), (19, '1984-11-04', 1.75, 10), diff --git a/mathesar/tests/api/query/test_aggregation_functions.py b/mathesar/tests/api/query/test_aggregation_functions.py new file mode 100644 index 0000000000..a5888c5baf --- /dev/null +++ b/mathesar/tests/api/query/test_aggregation_functions.py @@ -0,0 +1,771 @@ +display_option_origin = "display_option_origin" + + +def test_Mathesar_money_distinct_list_aggregation(library_ma_tables, get_uid, client): + _ = library_ma_tables + items = { + t["name"]: t for t in client.get("/api/db/v0/tables/").json()["results"] + }["Items"] + columns = { + c["name"]: c for c in items["columns"] + } + request_data = { + "name": get_uid(), + "base_table": items["id"], + "initial_columns": [ + {"id": columns["Publication"]["id"], "alias": "Publication"}, + {"id": columns["Acquisition Price"]["id"], "alias": "Acquisition Price"}, + ], + "display_names": { + "Acquisition Price": "Price", + "Publication": "Publication", + }, + "display_options": { + "Publication": { + display_option_origin: "Publication", + }, + "Acquisition Price": { + display_option_origin: "Acquisition Price", + }, + }, + "transformations": [ + { + "spec": { + "grouping_expressions": [ + { + "input_alias": "Publication", + "output_alias": "Publication", + } + ], + "aggregation_expressions": [ + { + "input_alias": "Acquisition Price", + "output_alias": "Acquisition Price", + "function": "distinct_aggregate_to_array" + } + ] + }, + "type": "summarize", + } + ] + } + response = client.post('/api/db/v0/queries/', data=request_data) + assert response.status_code == 201 + query_id = response.json()['id'] + expect_records = [ + { + "Publication": 1, + "Acquisition Price": [ + 0.59 + ] + }, + { + "Publication": 2, + "Acquisition Price": [ + 6.09 + ] + }, + { + "Publication": 3, + "Acquisition Price": [ + 3.89 + ] + }, + { + "Publication": 4, + "Acquisition Price": [ + 11.42, + 13.55 + ] + }, + { + "Publication": 5, + "Acquisition Price": [ + 10.75 + ] + }, + { + "Publication": 6, + "Acquisition Price": [ + 12.08 + ] + }, + { + "Publication": 7, + "Acquisition Price": [ + 4.66 + ] + }, + { + "Publication": 8, + "Acquisition Price": [ + 0.1 + ] + }, + { + "Publication": 9, + "Acquisition Price": [ + 11.05, + 14.94 + ] + }, + { + "Publication": 10, + "Acquisition Price": [ + 1.75, + 3.88 + ] + }, + { + "Publication": 11, + "Acquisition Price": [ + 4.8 + ] + }, + { + "Publication": 12, + "Acquisition Price": [ + 1.31 + ] + }, + { + "Publication": 13, + "Acquisition Price": [ + 2.06, + 7.77 + ] + }, + { + "Publication": 14, + "Acquisition Price": [ + 8.26 + ] + }, + { + "Publication": 15, + "Acquisition Price": [ + 3.09, + 3.73, + 3.76, + 9.6, + 11.77, + 13.06 + ] + }, + { + "Publication": 16, + "Acquisition Price": [ + 4.28 + ] + }, + { + "Publication": 17, + "Acquisition Price": [ + 2.03, + 3.23 + ] + }, + { + "Publication": 18, + "Acquisition Price": [ + 3.62, + 5.45, + 9.77, + 10.78 + ] + }, + { + "Publication": 19, + "Acquisition Price": [ + 9.55 + ] + }, + { + "Publication": 20, + "Acquisition Price": [ + 0.16, + 5.28 + ] + }, + { + "Publication": 21, + "Acquisition Price": [ + 5.29 + ] + }, + { + "Publication": 22, + "Acquisition Price": [ + 8.91, + 12.06, + 14.76 + ] + }, + { + "Publication": 23, + "Acquisition Price": [ + 4.69, + 14.48 + ] + }, + { + "Publication": 24, + "Acquisition Price": [ + 2.08, + 4.52, + 12.53 + ] + }, + { + "Publication": 25, + "Acquisition Price": [ + 7.45, + 10.39 + ] + }, + { + "Publication": 26, + "Acquisition Price": [ + 3.36, + 14.59 + ] + }, + { + "Publication": 27, + "Acquisition Price": [ + 1.12 + ] + }, + { + "Publication": 28, + "Acquisition Price": [ + 3.18, + 12.24 + ] + }, + { + "Publication": 29, + "Acquisition Price": [ + 10.6 + ] + }, + { + "Publication": 30, + "Acquisition Price": [ + 6.38 + ] + }, + { + "Publication": 31, + "Acquisition Price": [ + 8.47 + ] + }, + { + "Publication": 32, + "Acquisition Price": [ + 2.11 + ] + }, + { + "Publication": 33, + "Acquisition Price": [ + 2.77 + ] + }, + { + "Publication": 34, + "Acquisition Price": [ + 9.23, + 10.27, + 10.82, + 12.35, + 12.78 + ] + }, + { + "Publication": 35, + "Acquisition Price": [ + 8.25 + ] + }, + { + "Publication": 36, + "Acquisition Price": [ + 12.79, + 12.98, + 13.96 + ] + }, + { + "Publication": 37, + "Acquisition Price": [ + 1.88, + 5.57, + 10.81, + 13.37 + ] + }, + { + "Publication": 38, + "Acquisition Price": [ + 12.01 + ] + }, + { + "Publication": 39, + "Acquisition Price": [ + 3.17 + ] + }, + { + "Publication": 40, + "Acquisition Price": [ + 2.73, + 10.1 + ] + }, + { + "Publication": 41, + "Acquisition Price": [ + 10.55, + 13.57 + ] + }, + { + "Publication": 42, + "Acquisition Price": [ + 8.31, + 9.27, + 11.83 + ] + }, + { + "Publication": 43, + "Acquisition Price": [ + 6.63, + 13.27 + ] + }, + { + "Publication": 44, + "Acquisition Price": [ + 5.14 + ] + }, + { + "Publication": 45, + "Acquisition Price": [ + 7.21 + ] + }, + { + "Publication": 46, + "Acquisition Price": [ + 13.85 + ] + }, + { + "Publication": 47, + "Acquisition Price": [ + 10.93, + 10.99 + ] + }, + { + "Publication": 48, + "Acquisition Price": [ + 4.02, + 6.41, + 9.6, + 10.83, + 14.32 + ] + }, + { + "Publication": 49, + "Acquisition Price": [ + 5.74, + 6.66, + 13.08 + ] + }, + { + "Publication": 50, + "Acquisition Price": [ + 6.97, + 13.75 + ] + } + ] + actual_records = client.get(f'/api/db/v0/queries/{query_id}/records/').json()['results'] + assert sorted(actual_records, key=lambda x: x['Publication']) == expect_records + + +def test_Mathesar_URI_distinct_list_aggregation(library_ma_tables, get_uid, client): + _ = library_ma_tables + authors = { + t["name"]: t for t in client.get("/api/db/v0/tables/").json()["results"] + }["Authors"] + columns = { + c["name"]: c for c in authors["columns"] + } + request_data = { + "name": get_uid(), + "base_table": authors["id"], + "initial_columns": [ + {"id": columns["Author Last Name"]["id"], "alias": "Author Last Name"}, + {"id": columns["Author Website"]["id"], "alias": "Author Website"}, + ], + "display_names": { + "Author Last Name": "Author Last Name", + "Website": "Website", + }, + "display_options": { + "Author Last Name": { + display_option_origin: "Author Last Name", + }, + "Author Website": { + display_option_origin: "Author Website", + }, + }, + "transformations": [ + { + "spec": { + "grouping_expressions": [ + { + "input_alias": "Author Last Name", + "output_alias": "Author Last Name", + } + ], + "aggregation_expressions": [ + { + "input_alias": "Author Website", + "output_alias": "Website", + "function": "distinct_aggregate_to_array", + } + ] + }, + "type": "summarize", + } + ] + } + response = client.post('/api/db/v0/queries/', data=request_data) + assert response.status_code == 201 + query_id = response.json()['id'] + + expect_records = [ + { + "Author Last Name": "Castillo", + "Website": [ + "https://jennifercastillo.com" + ] + }, + { + "Author Last Name": "Diaz", + "Website": [ + "https://diaz.net" + ] + }, + { + "Author Last Name": "Dunlap", + "Website": [ + "https://dunlap.com" + ] + }, + { + "Author Last Name": "Edwards", + "Website": [ + "https://catherineedwards.com", + "https://edwards.info" + ] + }, + { + "Author Last Name": "Evans", + "Website": [ + "https://bonnieevans.com" + ] + }, + { + "Author Last Name": "Harris", + "Website": [ + "http://harris.info" + ] + }, + { + "Author Last Name": "Herrera", + "Website": [ + None + ] + }, + { + "Author Last Name": "Jensen", + "Website": [ + "http://hannahjensen.org" + ] + }, + { + "Author Last Name": "Johnson", + "Website": [ + "https://kimberlyjohnson.net" + ] + }, + { + "Author Last Name": "Medina", + "Website": [ + None + ] + }, + { + "Author Last Name": "Munoz", + "Website": [ + "https://munoz.com" + ] + }, + { + "Author Last Name": "Newman", + "Website": [ + None + ] + }, + { + "Author Last Name": "Robinson", + "Website": [ + "https://seanrobinson.com" + ] + } + ] + actual_records = client.get(f'/api/db/v0/queries/{query_id}/records/').json()['results'] + assert sorted(actual_records, key=lambda x: x['Author Last Name']) == expect_records + + +def test_Mathesar_Email_distinct_list_aggregation(library_ma_tables, get_uid, client): + _ = library_ma_tables + patrons = { + t["name"]: t for t in client.get("/api/db/v0/tables/").json()["results"] + }["Patrons"] + columns = { + c["name"]: c for c in patrons["columns"] + } + request_data = { + "name": get_uid(), + "base_table": patrons["id"], + "initial_columns": [ + {"id": columns["First Name"]["id"], "alias": "First Name"}, + {"id": columns["Email"]["id"], "alias": "Email"}, + ], + "display_names": { + "First Name": "First Name", + "Email": "Email", + }, + "display_options": { + "First Name": { + display_option_origin: "First Name", + }, + "Email": { + display_option_origin: "Email", + }, + }, + "transformations": [ + { + "spec": { + "grouping_expressions": [ + { + "input_alias": "First Name", + "output_alias": "First Name", + } + ], + "aggregation_expressions": [ + { + "input_alias": "Email", + "output_alias": "Email", + "function": "distinct_aggregate_to_array", + } + ] + }, + "type": "summarize", + } + ] + } + response = client.post('/api/db/v0/queries/', data=request_data) + assert response.status_code == 201 + query_id = response.json()['id'] + expect_records = [ + { + "First Name": "Alexander", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Andrew", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Autumn", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Barry", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Benjamin", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Calvin", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Connor", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Deanna", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Eduardo", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Harry", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Heather", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Jason", + "Email": [ + "[email protected]", + "[email protected]" + ] + }, + { + "First Name": "Jennifer", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Jesse", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Joshua", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Kathy", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Kristen", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Laura", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Lori", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Luke", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Mary", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Nicole", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Patrick", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Rita", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Toni", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Traci", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Tyler", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Walter", + "Email": [ + "[email protected]" + ] + }, + { + "First Name": "Yvonne", + "Email": [ + "[email protected]" + ] + } + ] + actual_records = client.get(f'/api/db/v0/queries/{query_id}/records/').json()['results'] + assert sorted(actual_records, key=lambda x: x['First Name']) == expect_records
pyjanitor-devs__pyjanitor-289
Utilize autosummary Sphinx directive in API Reference ## Proposal A consolidated list of functionality would go a long way in [our API Reference](https://pyjanitor.readthedocs.io/api.html) section. Other libraries have leveraged the [autosummary](http://www.sphinx-doc.org/en/master/usage/extensions/autosummary.html#directive-autosummary) Sphinx directive to achieve this to great effect. For instance: * Pandas: [Docs](https://pandas.pydata.org/pandas-docs/stable/reference/indexing.html), [Raw](https://raw.githubusercontent.com/pandas-dev/pandas/master/doc/source/reference/indexing.rst) * Matplotlib: [Docs](https://matplotlib.org/api/axes_api.html), [Raw](https://matplotlib.org/_sources/api/axes_api.rst.txt) ## Implementation Details Apart from rolling `sphinx.ext.autosummary` into the `conf.py` this would also involve going through and enumerating the different functions in the `api.rst` documentation. A concern here, though-- this would mean that all future feature introductions would have to get appended to the lists in these files, **which necessitates adding this step to the PR checklist**... Until someone figures out a more programmatic way to do this, anyhow 😉
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\ncopyright = \"2018, Eric J. Ma\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\ncopyright = \"2018, Eric J. Ma\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py" } ]
diff --git a/AUTHORS.rst b/AUTHORS.rst index e10a37ef5..304879bcc 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -29,5 +29,6 @@ With thanks to the following contributors: - `@jack-kessler-88 <https://github.com/jack-kessler-88>`_ - `@NapsterInBlue <https://github.com/NapsterInBlue>`_ - `@jk3587 <https://github.com/jk3587>`_ +- `@ricky-lim<https://github.com/ricky-lim>`_ - `@catherinedevlin <https://github.com/catherinedevlin>`_ - `@StephenSchroed <https://github.com/StephenSchroeder>`_ diff --git a/docs/conf.py b/docs/conf.py index 8ed8f8d99..8fed08b42 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -58,6 +58,7 @@ "sphinx.ext.githubpages", "sphinxcontrib.fulltoc", "nbsphinx", + "sphinx.ext.autosummary", ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/general_functions.rst b/docs/general_functions.rst new file mode 100644 index 000000000..2593f08bf --- /dev/null +++ b/docs/general_functions.rst @@ -0,0 +1,51 @@ +================= +General functions +================= +.. currentmodule:: janitor + +Modify columns +~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: janitor.functions/ + + expand_column + concatenate_columns + deconcatenate_column + remove_columns + add_column + add_columns + transform_column + transform_columns + rename_column + reorder_columns + reset_index_inplace + collapse_levels + change_type + limit_column_characters + row_to_names + clean_names + +Modify values +~~~~~~~~~~~~~ +.. autosummary:: + :toctree: janitor.functions/ + + fill_empty + convert_excel_date + convert_matlab_date + convert_unix_date + remove_empty + coalesce + find_replace + dropnotnull + update_where + +Preprocessing +~~~~~~~~~~~~~ +.. autosummary:: + :toctree: janitor.functions/ + + min_max_scale + impute + label_encode + encode_categorical
mars-project__mars-1631
[BUG] `Cannot find serializable class` raised when calling set_label When calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.
[ { "content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py" } ]
[ { "content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n # make sure operand is registered\n from .set_label import DataFrameSetLabel\n del DataFrameSetLabel\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py" } ]
diff --git a/.github/workflows/install-minikube.sh b/.github/workflows/install-minikube.sh index fe7680d21d..81e90c92d6 100755 --- a/.github/workflows/install-minikube.sh +++ b/.github/workflows/install-minikube.sh @@ -3,9 +3,18 @@ set -e export CHANGE_MINIKUBE_NONE_USER=true sudo apt-get -q update || true -sudo apt-get install -yq conntrack +sudo apt-get install -yq conntrack jq + +get_latest_release() { + curl --silent "https://api.github.com/repos/$1/releases" | + jq -c '[.[] | select(.prerelease == false)][0].tag_name' | + sed -E 's/.*"([^"]+)".*/\1/' +} K8S_VERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) +if [[ "$K8S_VERSION" == *"alpha"* ]] || [[ "$K8S_VERSION" == *"beta"* ]] || [[ "$K8S_VERSION" == *"rc"* ]]; then + K8S_VERSION=$(get_latest_release "kubernetes/kubernetes") +fi curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kubectl && \ chmod +x kubectl && sudo mv kubectl /usr/local/bin/ diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py index c04ea28ff9..e33a0d96a2 100644 --- a/mars/dataframe/indexing/__init__.py +++ b/mars/dataframe/indexing/__init__.py @@ -56,6 +56,10 @@ def _install(): for cls in INDEX_TYPE: setattr(cls, 'rename', index_rename) + # make sure operand is registered + from .set_label import DataFrameSetLabel + del DataFrameSetLabel + _install() del _install diff --git a/mars/serialize/core.pxd b/mars/serialize/core.pxd index cae681b085..651de81a6a 100644 --- a/mars/serialize/core.pxd +++ b/mars/serialize/core.pxd @@ -263,6 +263,7 @@ cpdef enum ProviderType: cpdef object get_serializable_by_index(object index) +cpdef dict get_serializables() cdef class Provider: diff --git a/mars/serialize/core.pyx b/mars/serialize/core.pyx index cf06496dd1..a2b5717587 100644 --- a/mars/serialize/core.pyx +++ b/mars/serialize/core.pyx @@ -786,6 +786,10 @@ cpdef object get_serializable_by_index(object index): return _serializable_registry.get(index) +cpdef dict get_serializables(): + return _serializable_registry.copy() + + cdef class Provider: cpdef serialize_field(self, Field field, model_instance, obj): raise NotImplementedError diff --git a/mars/tests/core.py b/mars/tests/core.py index e51e6dd62b..753ff7efd4 100644 --- a/mars/tests/core.py +++ b/mars/tests/core.py @@ -36,6 +36,7 @@ from mars.optimizes.chunk_graph.fuse import Fusion from mars.serialize import serializes, deserializes, \ ProtobufSerializeProvider, JsonSerializeProvider +from mars.serialize.core import get_serializables from mars.utils import lazy_import try: @@ -612,6 +613,8 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._raw_chunk_shapes = dict() self._tileable_checked = dict() + if not hasattr(type(self), '_serializables_snapshot'): + type(self)._serializables_snapshot = get_serializables() @staticmethod def _extract_check_options(kw_dict): @@ -682,6 +685,14 @@ def _update_tileable_and_chunk_shape(self, tileable_graph, chunk_result, failed_ self._tileable_checked[n.key] = True return super()._update_tileable_and_chunk_shape(tileable_graph, chunk_result, failed_ops) + def _check_serializable_registration(self): + cur_serializables = get_serializables() + if len(cur_serializables) == len(self._serializables_snapshot): + return + unregistered_set = set(cur_serializables.keys()) - set(self._serializables_snapshot.keys()) + raise AssertionError('Operands %r not registered on initialization' + % ([cur_serializables[k] for k in unregistered_set],)) + def execute_tileable(self, tileable, *args, **kwargs): self._extract_check_options(kwargs) @@ -695,6 +706,7 @@ def execute_tileable(self, tileable, *args, **kwargs): # check returned type if kwargs.get('concat', False): self.assert_object_consistent(tileable, result[0]) + self._check_serializable_registration() return result execute_tensor = execute_tileable @@ -716,6 +728,7 @@ def execute_tileables(self, tileables, *args, **kwargs): if _check_options['check_nsplits']: self._check_nsplits(tileable) self.assert_object_consistent(tileable, result) + self._check_serializable_registration() return results def fetch_tileables(self, tileables, **kw):
tiangolo__fastapi-9468
FastAPI tests in pydantic failing due to flask deprecation ### Privileged issue - [X] I'm @tiangolo or he asked me directly to create an issue here. ### Issue Content hope you don't mind me creating an issue, pydantic's 1.10.X tests are failing due to a new issue with running our fastapi tests, see https://github.com/pydantic/pydantic/actions/runs/4832692304/jobs/8611783607?pr=5628 output from pydantic's tests: ``` ==================================== ERRORS ==================================== ______ ERROR collecting tests/test_tutorial/test_wsgi/test_tutorial001.py ______ tests/test_tutorial/test_wsgi/test_tutorial001.py:3: in <module> from docs_src.wsgi.tutorial001 import app docs_src/wsgi/tutorial001.py:3: in <module> from flask import Flask, escape, request <frozen importlib._bootstrap>:1075: in _handle_fromlist ??? /opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/flask/__init__.py:71: in __getattr__ warnings.warn( E DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' instead. =========================== short test summary info ============================ ERROR tests/test_tutorial/test_wsgi/test_tutorial001.py - DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' ``` related to https://github.com/pydantic/pydantic/pull/5628
[ { "content": "from fastapi import FastAPI\nfrom fastapi.middleware.wsgi import WSGIMiddleware\nfrom flask import Flask, escape, request\n\nflask_app = Flask(__name__)\n\n\n@flask_app.route(\"/\")\ndef flask_main():\n name = request.args.get(\"name\", \"World\")\n return f\"Hello, {escape(name)} from Flask!\"\n\n\napp = FastAPI()\n\n\[email protected](\"/v2\")\ndef read_main():\n return {\"message\": \"Hello World\"}\n\n\napp.mount(\"/v1\", WSGIMiddleware(flask_app))\n", "path": "docs_src/wsgi/tutorial001.py" } ]
[ { "content": "from fastapi import FastAPI\nfrom fastapi.middleware.wsgi import WSGIMiddleware\nfrom flask import Flask, request\nfrom markupsafe import escape\n\nflask_app = Flask(__name__)\n\n\n@flask_app.route(\"/\")\ndef flask_main():\n name = request.args.get(\"name\", \"World\")\n return f\"Hello, {escape(name)} from Flask!\"\n\n\napp = FastAPI()\n\n\[email protected](\"/v2\")\ndef read_main():\n return {\"message\": \"Hello World\"}\n\n\napp.mount(\"/v1\", WSGIMiddleware(flask_app))\n", "path": "docs_src/wsgi/tutorial001.py" } ]
diff --git a/docs/en/docs/advanced/wsgi.md b/docs/en/docs/advanced/wsgi.md index df8865961721d..cfe3c78c11ca4 100644 --- a/docs/en/docs/advanced/wsgi.md +++ b/docs/en/docs/advanced/wsgi.md @@ -12,7 +12,7 @@ Then wrap the WSGI (e.g. Flask) app with the middleware. And then mount that under a path. -```Python hl_lines="2-3 22" +```Python hl_lines="2-3 23" {!../../../docs_src/wsgi/tutorial001.py!} ``` diff --git a/docs_src/wsgi/tutorial001.py b/docs_src/wsgi/tutorial001.py index 500ecf883eaf6..7f27a85a19ae7 100644 --- a/docs_src/wsgi/tutorial001.py +++ b/docs_src/wsgi/tutorial001.py @@ -1,6 +1,7 @@ from fastapi import FastAPI from fastapi.middleware.wsgi import WSGIMiddleware -from flask import Flask, escape, request +from flask import Flask, request +from markupsafe import escape flask_app = Flask(__name__)
bookwyrm-social__bookwyrm-2418
Running behind a proxy produces wrong embedding URLs I was a little bit confused that the URLs for embedding were http instead of https. After looking up where that comes from https://github.com/bookwyrm-social/bookwyrm/blob/fdc477afdf4002ea432741121024ea75dbc68192/bookwyrm/views/list/list.py#L53 `build_absolute_uri()` will always generate an absolute URI with the same scheme the current request has - as the traffic comes via http vfrom the proxy it will always build http://URL. Iw as confused as my proxy is giving the correct headers in the request. I learned that Django needs to be configured to believe the headers. This is done by adding `SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')` to `settings.py` You can read about it here: https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header **As this can be a security issue I would not recommend to add it to `settings.py` per default but I recommend to add an option into .env that this can be set there and stays after an update.** I have it at the moment in my `settings.py` and I am prepared to put it back there every time - but I am waiting for the update I forget it and get confused. As my setup is forwarding _every_ requests to https it is no problem if someone would take the `http`-ULR by accident. I know Servers which do not forward to `https:` by default and sometimes admins do not care as port 80 its just serving nothing. Who knows what is coming in the future. I saw the first servers who do not have a port 80 any more - even no forwarding. I think it is just a question of time until we see more and more SSL only Setups. Long talking short: Please take it into consideration.
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\n# pylint: disable=line-too-long\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.5.1\"\n\nRELEASE_API = env(\n \"RELEASE_API\",\n \"https://api.github.com/repos/bookwyrm-social/bookwyrm/releases/latest\",\n)\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"e678183c\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nEMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\nEMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_DOMAIN\", DOMAIN)\nEMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nSTATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\nMEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\nPREVIEW_DEFAULT_FONT = env.str(\"PREVIEW_DEFAULT_FONT\", \"Source Han Sans\")\n\nFONTS = {\n \"Source Han Sans\": {\n \"directory\": \"source_han_sans\",\n \"filename\": \"SourceHanSans-VF.ttf.ttc\",\n \"url\": \"https://github.com/adobe-fonts/source-han-sans/raw/release/Variable/OTC/SourceHanSans-VF.ttf.ttc\",\n }\n}\nFONT_DIR = os.path.join(STATIC_ROOT, \"fonts\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", not DEBUG)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"sass_processor\",\n \"bookwyrm\",\n \"celery\",\n \"django_celery_beat\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\nLOG_LEVEL = env(\"LOG_LEVEL\", \"INFO\").upper()\n# Override aspects of the default handler to our taste\n# See https://docs.djangoproject.com/en/3.2/topics/logging/#default-logging-configuration\n# for a reference to the defaults we're overriding\n#\n# It seems that in order to override anything you have to include its\n# entire dependency tree (handlers and filters) which makes this a\n# bit verbose\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n # These are copied from the default configuration, required for\n # implementing mail_admins below\n \"require_debug_false\": {\n \"()\": \"django.utils.log.RequireDebugFalse\",\n },\n \"require_debug_true\": {\n \"()\": \"django.utils.log.RequireDebugTrue\",\n },\n \"ignore_missing_variable\": {\n \"()\": \"bookwyrm.utils.log.IgnoreVariableDoesNotExist\",\n },\n },\n \"handlers\": {\n # Overrides the default handler to make it log to console\n # regardless of the DEBUG setting (default is to not log to\n # console if DEBUG=False)\n \"console\": {\n \"level\": LOG_LEVEL,\n \"filters\": [\"ignore_missing_variable\"],\n \"class\": \"logging.StreamHandler\",\n },\n # This is copied as-is from the default logger, and is\n # required for the django section below\n \"mail_admins\": {\n \"level\": \"ERROR\",\n \"filters\": [\"require_debug_false\"],\n \"class\": \"django.utils.log.AdminEmailHandler\",\n },\n },\n \"loggers\": {\n # Install our new console handler for Django's logger, and\n # override the log level while we're at it\n \"django\": {\n \"handlers\": [\"console\", \"mail_admins\"],\n \"level\": LOG_LEVEL,\n },\n \"django.utils.autoreload\": {\n \"level\": \"INFO\",\n },\n # Add a bookwyrm-specific logger\n \"bookwyrm\": {\n \"handlers\": [\"console\"],\n \"level\": LOG_LEVEL,\n },\n },\n}\n\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"sass_processor.finders.CssFinder\",\n]\n\nSASS_PROCESSOR_INCLUDE_FILE_PATTERN = r\"^.+\\.[s]{0,1}(?:a|c)ss$\"\nSASS_PROCESSOR_ENABLED = True\n\n# minify css is production but not dev\nif not DEBUG:\n SASS_OUTPUT_STYLE = \"compressed\"\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\nREDIS_ACTIVITY_DB_INDEX = env(\"REDIS_ACTIVITY_DB_INDEX\", 0)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Search configuration\n# total time in seconds that the instance will spend searching connectors\nSEARCH_TIMEOUT = int(env(\"SEARCH_TIMEOUT\", 8))\n# timeout for a query to an individual connector\nQUERY_TIMEOUT = int(env(\"QUERY_TIMEOUT\", 5))\n\n# Redis cache backend\nif env(\"USE_DUMMY_CACHE\", False):\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.dummy.DummyCache\",\n }\n }\nelse:\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/{REDIS_ACTIVITY_DB_INDEX}\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n }\n\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = env(\"LANGUAGE_CODE\", \"en-us\")\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"ca-es\", _(\"Català (Catalan)\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Español (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"it-it\", _(\"Italiano (Italian)\")),\n (\"fi-fi\", _(\"Suomi (Finnish)\")),\n (\"fr-fr\", _(\"Français (French)\")),\n (\"lt-lt\", _(\"Lietuvių (Lithuanian)\")),\n (\"no-no\", _(\"Norsk (Norwegian)\")),\n (\"pl-pl\", _(\"Polski (Polish)\")),\n (\"pt-br\", _(\"Português do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Português Europeu (European Portuguese)\")),\n (\"ro-ro\", _(\"Română (Romanian)\")),\n (\"sv-se\", _(\"Svenska (Swedish)\")),\n (\"zh-hans\", _(\"简体中文 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"繁體中文 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\nIMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = \"bookwyrm.thumbnail_generation.Strategy\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\nelse:\n STATIC_URL = \"/static/\"\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n\nOTEL_EXPORTER_OTLP_ENDPOINT = env(\"OTEL_EXPORTER_OTLP_ENDPOINT\", None)\nOTEL_EXPORTER_OTLP_HEADERS = env(\"OTEL_EXPORTER_OTLP_HEADERS\", None)\nOTEL_SERVICE_NAME = env(\"OTEL_SERVICE_NAME\", None)\n\nTWO_FACTOR_LOGIN_MAX_SECONDS = 60\n", "path": "bookwyrm/settings.py" } ]
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\n# pylint: disable=line-too-long\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.5.1\"\n\nRELEASE_API = env(\n \"RELEASE_API\",\n \"https://api.github.com/repos/bookwyrm-social/bookwyrm/releases/latest\",\n)\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"e678183c\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nEMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\nEMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_DOMAIN\", DOMAIN)\nEMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nSTATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\nMEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\nPREVIEW_DEFAULT_FONT = env.str(\"PREVIEW_DEFAULT_FONT\", \"Source Han Sans\")\n\nFONTS = {\n \"Source Han Sans\": {\n \"directory\": \"source_han_sans\",\n \"filename\": \"SourceHanSans-VF.ttf.ttc\",\n \"url\": \"https://github.com/adobe-fonts/source-han-sans/raw/release/Variable/OTC/SourceHanSans-VF.ttf.ttc\",\n }\n}\nFONT_DIR = os.path.join(STATIC_ROOT, \"fonts\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", not DEBUG)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"sass_processor\",\n \"bookwyrm\",\n \"celery\",\n \"django_celery_beat\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\nLOG_LEVEL = env(\"LOG_LEVEL\", \"INFO\").upper()\n# Override aspects of the default handler to our taste\n# See https://docs.djangoproject.com/en/3.2/topics/logging/#default-logging-configuration\n# for a reference to the defaults we're overriding\n#\n# It seems that in order to override anything you have to include its\n# entire dependency tree (handlers and filters) which makes this a\n# bit verbose\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n # These are copied from the default configuration, required for\n # implementing mail_admins below\n \"require_debug_false\": {\n \"()\": \"django.utils.log.RequireDebugFalse\",\n },\n \"require_debug_true\": {\n \"()\": \"django.utils.log.RequireDebugTrue\",\n },\n \"ignore_missing_variable\": {\n \"()\": \"bookwyrm.utils.log.IgnoreVariableDoesNotExist\",\n },\n },\n \"handlers\": {\n # Overrides the default handler to make it log to console\n # regardless of the DEBUG setting (default is to not log to\n # console if DEBUG=False)\n \"console\": {\n \"level\": LOG_LEVEL,\n \"filters\": [\"ignore_missing_variable\"],\n \"class\": \"logging.StreamHandler\",\n },\n # This is copied as-is from the default logger, and is\n # required for the django section below\n \"mail_admins\": {\n \"level\": \"ERROR\",\n \"filters\": [\"require_debug_false\"],\n \"class\": \"django.utils.log.AdminEmailHandler\",\n },\n },\n \"loggers\": {\n # Install our new console handler for Django's logger, and\n # override the log level while we're at it\n \"django\": {\n \"handlers\": [\"console\", \"mail_admins\"],\n \"level\": LOG_LEVEL,\n },\n \"django.utils.autoreload\": {\n \"level\": \"INFO\",\n },\n # Add a bookwyrm-specific logger\n \"bookwyrm\": {\n \"handlers\": [\"console\"],\n \"level\": LOG_LEVEL,\n },\n },\n}\n\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"sass_processor.finders.CssFinder\",\n]\n\nSASS_PROCESSOR_INCLUDE_FILE_PATTERN = r\"^.+\\.[s]{0,1}(?:a|c)ss$\"\nSASS_PROCESSOR_ENABLED = True\n\n# minify css is production but not dev\nif not DEBUG:\n SASS_OUTPUT_STYLE = \"compressed\"\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\nREDIS_ACTIVITY_DB_INDEX = env(\"REDIS_ACTIVITY_DB_INDEX\", 0)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Search configuration\n# total time in seconds that the instance will spend searching connectors\nSEARCH_TIMEOUT = int(env(\"SEARCH_TIMEOUT\", 8))\n# timeout for a query to an individual connector\nQUERY_TIMEOUT = int(env(\"QUERY_TIMEOUT\", 5))\n\n# Redis cache backend\nif env(\"USE_DUMMY_CACHE\", False):\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.dummy.DummyCache\",\n }\n }\nelse:\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/{REDIS_ACTIVITY_DB_INDEX}\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n }\n\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = env(\"LANGUAGE_CODE\", \"en-us\")\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"ca-es\", _(\"Català (Catalan)\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Español (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"it-it\", _(\"Italiano (Italian)\")),\n (\"fi-fi\", _(\"Suomi (Finnish)\")),\n (\"fr-fr\", _(\"Français (French)\")),\n (\"lt-lt\", _(\"Lietuvių (Lithuanian)\")),\n (\"no-no\", _(\"Norsk (Norwegian)\")),\n (\"pl-pl\", _(\"Polski (Polish)\")),\n (\"pt-br\", _(\"Português do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Português Europeu (European Portuguese)\")),\n (\"ro-ro\", _(\"Română (Romanian)\")),\n (\"sv-se\", _(\"Svenska (Swedish)\")),\n (\"zh-hans\", _(\"简体中文 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"繁體中文 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\nIMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = \"bookwyrm.thumbnail_generation.Strategy\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\nelse:\n STATIC_URL = \"/static/\"\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n\nOTEL_EXPORTER_OTLP_ENDPOINT = env(\"OTEL_EXPORTER_OTLP_ENDPOINT\", None)\nOTEL_EXPORTER_OTLP_HEADERS = env(\"OTEL_EXPORTER_OTLP_HEADERS\", None)\nOTEL_SERVICE_NAME = env(\"OTEL_SERVICE_NAME\", None)\n\nTWO_FACTOR_LOGIN_MAX_SECONDS = 60\n\nHTTP_X_FORWARDED_PROTO = env.bool(\"SECURE_PROXY_SSL_HEADER\", False)\nif HTTP_X_FORWARDED_PROTO:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n", "path": "bookwyrm/settings.py" } ]
diff --git a/.env.example b/.env.example index 58c53b5bf2..bbd825a9af 100644 --- a/.env.example +++ b/.env.example @@ -108,3 +108,10 @@ OTEL_EXPORTER_OTLP_ENDPOINT= OTEL_EXPORTER_OTLP_HEADERS= # Service name to identify your app OTEL_SERVICE_NAME= + +# Set HTTP_X_FORWARDED_PROTO ONLY to true if you know what you are doing. +# Only use it if your proxy is "swallowing" if the original request was made +# via https. Please refer to the Django-Documentation and assess the risks +# for your instance: +# https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header +HTTP_X_FORWARDED_PROTO=false diff --git a/.github/workflows/django-tests.yml b/.github/workflows/django-tests.yml index 97a7448131..4335a46055 100644 --- a/.github/workflows/django-tests.yml +++ b/.github/workflows/django-tests.yml @@ -56,5 +56,6 @@ jobs: EMAIL_USE_TLS: true ENABLE_PREVIEW_IMAGES: false ENABLE_THUMBNAIL_GENERATION: true + HTTP_X_FORWARDED_PROTO: false run: | pytest -n 3 diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py index 0fcc005908..ed0f578392 100644 --- a/bookwyrm/settings.py +++ b/bookwyrm/settings.py @@ -364,3 +364,7 @@ OTEL_SERVICE_NAME = env("OTEL_SERVICE_NAME", None) TWO_FACTOR_LOGIN_MAX_SECONDS = 60 + +HTTP_X_FORWARDED_PROTO = env.bool("SECURE_PROXY_SSL_HEADER", False) +if HTTP_X_FORWARDED_PROTO: + SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
ivy-llc__ivy-16680
rad2deg
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py index ae73cb96141ea..602b8d1e24ec7 100644 --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -150,3 +150,9 @@ def log2(x, name=None): @to_ivy_arrays_and_back def log1p(x, name=None): return ivy.log1p(x) + + +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def rad2deg(x, name=None): + return ivy.rad2deg(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py index b960d406a3fdb..338602e340695 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py @@ -669,3 +669,29 @@ def test_paddle_log1p( on_device=on_device, x=x[0], ) + + +# rad2deg +@handle_frontend_test( + fn_tree="paddle.rad2deg", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + ), +) +def test_paddle_rad2deg( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + )
xonsh__xonsh-2360
@$ result is not satisfying When I run `@$(which ls)` I expect to see this result: `some_file some_other_file` But I see this: `ls: invalid option -- ' ' Try 'ls --help' for more information. ` xonsh version: 0.5.9 os : linux (Ubuntu 16.04.2)
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"The xonsh built-ins.\n\nNote that this module is named 'built_ins' so as not to be confused with the\nspecial Python builtins module.\n\"\"\"\nimport io\nimport os\nimport re\nimport sys\nimport types\nimport shlex\nimport signal\nimport atexit\nimport pathlib\nimport inspect\nimport builtins\nimport itertools\nimport subprocess\nimport contextlib\nimport collections.abc as cabc\n\nfrom xonsh.ast import AST\nfrom xonsh.lazyasd import LazyObject, lazyobject\nfrom xonsh.inspectors import Inspector\nfrom xonsh.aliases import Aliases, make_default_aliases\nfrom xonsh.environ import Env, default_env, locate_binary\nfrom xonsh.jobs import add_job\nfrom xonsh.platform import ON_POSIX, ON_WINDOWS\nfrom xonsh.proc import (\n PopenThread, ProcProxyThread, ProcProxy, ConsoleParallelReader,\n pause_call_resume, CommandPipeline, HiddenCommandPipeline,\n STDOUT_CAPTURE_KINDS)\nfrom xonsh.tools import (\n suggest_commands, expand_path, globpath, XonshError,\n XonshCalledProcessError\n)\nfrom xonsh.lazyimps import pty, termios\nfrom xonsh.commands_cache import CommandsCache\nfrom xonsh.events import events\n\nimport xonsh.completers.init\n\nBUILTINS_LOADED = False\nINSPECTOR = LazyObject(Inspector, globals(), 'INSPECTOR')\n\n\n@lazyobject\ndef AT_EXIT_SIGNALS():\n sigs = (signal.SIGABRT, signal.SIGFPE, signal.SIGILL, signal.SIGSEGV,\n signal.SIGTERM)\n if ON_POSIX:\n sigs += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP)\n return sigs\n\n\ndef resetting_signal_handle(sig, f):\n \"\"\"Sets a new signal handle that will automatically restore the old value\n once the new handle is finished.\n \"\"\"\n oldh = signal.getsignal(sig)\n\n def newh(s=None, frame=None):\n f(s, frame)\n signal.signal(sig, oldh)\n if sig != 0:\n sys.exit(sig)\n signal.signal(sig, newh)\n\n\ndef helper(x, name=''):\n \"\"\"Prints help about, and then returns that variable.\"\"\"\n INSPECTOR.pinfo(x, oname=name, detail_level=0)\n return x\n\n\ndef superhelper(x, name=''):\n \"\"\"Prints help about, and then returns that variable.\"\"\"\n INSPECTOR.pinfo(x, oname=name, detail_level=1)\n return x\n\n\ndef reglob(path, parts=None, i=None):\n \"\"\"Regular expression-based globbing.\"\"\"\n if parts is None:\n path = os.path.normpath(path)\n drive, tail = os.path.splitdrive(path)\n parts = tail.split(os.sep)\n d = os.sep if os.path.isabs(path) else '.'\n d = os.path.join(drive, d)\n return reglob(d, parts, i=0)\n base = subdir = path\n if i == 0:\n if not os.path.isabs(base):\n base = ''\n elif len(parts) > 1:\n i += 1\n regex = os.path.join(base, parts[i])\n if ON_WINDOWS:\n # currently unable to access regex backslash sequences\n # on Windows due to paths using \\.\n regex = regex.replace('\\\\', '\\\\\\\\')\n regex = re.compile(regex)\n files = os.listdir(subdir)\n files.sort()\n paths = []\n i1 = i + 1\n if i1 == len(parts):\n for f in files:\n p = os.path.join(base, f)\n if regex.fullmatch(p) is not None:\n paths.append(p)\n else:\n for f in files:\n p = os.path.join(base, f)\n if regex.fullmatch(p) is None or not os.path.isdir(p):\n continue\n paths += reglob(p, parts=parts, i=i1)\n return paths\n\n\ndef path_literal(s):\n s = expand_path(s)\n return pathlib.Path(s)\n\n\ndef regexsearch(s):\n s = expand_path(s)\n return reglob(s)\n\n\ndef globsearch(s):\n csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS')\n glob_sorted = builtins.__xonsh_env__.get('GLOB_SORTED')\n return globpath(s, ignore_case=(not csc), return_empty=True,\n sort_result=glob_sorted)\n\n\ndef pathsearch(func, s, pymode=False, pathobj=False):\n \"\"\"\n Takes a string and returns a list of file paths that match (regex, glob,\n or arbitrary search function). If pathobj=True, the return is a list of\n pathlib.Path objects instead of strings.\n \"\"\"\n if (not callable(func) or\n len(inspect.signature(func).parameters) != 1):\n error = \"%r is not a known path search function\"\n raise XonshError(error % func)\n o = func(s)\n if pathobj and pymode:\n o = list(map(pathlib.Path, o))\n no_match = [] if pymode else [s]\n return o if len(o) != 0 else no_match\n\n\nRE_SHEBANG = LazyObject(lambda: re.compile(r'#![ \\t]*(.+?)$'),\n globals(), 'RE_SHEBANG')\n\n\ndef _is_binary(fname, limit=80):\n with open(fname, 'rb') as f:\n for i in range(limit):\n char = f.read(1)\n if char == b'\\0':\n return True\n if char == b'\\n':\n return False\n if char == b'':\n return False\n return False\n\n\ndef _un_shebang(x):\n if x == '/usr/bin/env':\n return []\n elif any(x.startswith(i) for i in ['/usr/bin', '/usr/local/bin', '/bin']):\n x = os.path.basename(x)\n elif x.endswith('python') or x.endswith('python.exe'):\n x = 'python'\n if x == 'xonsh':\n return ['python', '-m', 'xonsh.main']\n return [x]\n\n\ndef get_script_subproc_command(fname, args):\n \"\"\"Given the name of a script outside the path, returns a list representing\n an appropriate subprocess command to execute the script. Raises\n PermissionError if the script is not executable.\n \"\"\"\n # make sure file is executable\n if not os.access(fname, os.X_OK):\n raise PermissionError\n if ON_POSIX and not os.access(fname, os.R_OK):\n # on some systems, some importnat programs (e.g. sudo) will have\n # execute permissions but not read/write permisions. This enables\n # things with the SUID set to be run. Needs to come before _is_binary()\n # is called, because that function tries to read the file.\n return [fname] + args\n elif _is_binary(fname):\n # if the file is a binary, we should call it directly\n return [fname] + args\n if ON_WINDOWS:\n # Windows can execute various filetypes directly\n # as given in PATHEXT\n _, ext = os.path.splitext(fname)\n if ext.upper() in builtins.__xonsh_env__.get('PATHEXT'):\n return [fname] + args\n # find interpreter\n with open(fname, 'rb') as f:\n first_line = f.readline().decode().strip()\n m = RE_SHEBANG.match(first_line)\n # xonsh is the default interpreter\n if m is None:\n interp = ['xonsh']\n else:\n interp = m.group(1).strip()\n if len(interp) > 0:\n interp = shlex.split(interp)\n else:\n interp = ['xonsh']\n if ON_WINDOWS:\n o = []\n for i in interp:\n o.extend(_un_shebang(i))\n interp = o\n return interp + [fname] + args\n\n\n@lazyobject\ndef _REDIR_REGEX():\n name = \"(o(?:ut)?|e(?:rr)?|a(?:ll)?|&?\\d?)\"\n return re.compile(\"{r}(>?>|<){r}$\".format(r=name))\n\n\n_MODES = LazyObject(lambda: {'>>': 'a', '>': 'w', '<': 'r'}, globals(),\n '_MODES')\n_WRITE_MODES = LazyObject(lambda: frozenset({'w', 'a'}), globals(),\n '_WRITE_MODES')\n_REDIR_ALL = LazyObject(lambda: frozenset({'&', 'a', 'all'}),\n globals(), '_REDIR_ALL')\n_REDIR_ERR = LazyObject(lambda: frozenset({'2', 'e', 'err'}), globals(),\n '_REDIR_ERR')\n_REDIR_OUT = LazyObject(lambda: frozenset({'', '1', 'o', 'out'}), globals(),\n '_REDIR_OUT')\n_E2O_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(e, o)\n for e in _REDIR_ERR\n for o in _REDIR_OUT\n if o != ''}), globals(), '_E2O_MAP')\n_O2E_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(o, e)\n for e in _REDIR_ERR\n for o in _REDIR_OUT\n if o != ''}), globals(), '_O2E_MAP')\n\n\ndef _is_redirect(x):\n return isinstance(x, str) and _REDIR_REGEX.match(x)\n\n\ndef safe_open(fname, mode, buffering=-1):\n \"\"\"Safely attempts to open a file in for xonsh subprocs.\"\"\"\n # file descriptors\n try:\n return io.open(fname, mode, buffering=buffering)\n except PermissionError:\n raise XonshError('xonsh: {0}: permission denied'.format(fname))\n except FileNotFoundError:\n raise XonshError('xonsh: {0}: no such file or directory'.format(fname))\n except Exception:\n raise XonshError('xonsh: {0}: unable to open file'.format(fname))\n\n\ndef safe_close(x):\n \"\"\"Safely attempts to close an object.\"\"\"\n if not isinstance(x, io.IOBase):\n return\n if x.closed:\n return\n try:\n x.close()\n except Exception:\n pass\n\n\ndef _parse_redirects(r, loc=None):\n \"\"\"returns origin, mode, destination tuple\"\"\"\n orig, mode, dest = _REDIR_REGEX.match(r).groups()\n # redirect to fd\n if dest.startswith('&'):\n try:\n dest = int(dest[1:])\n if loc is None:\n loc, dest = dest, '' # NOQA\n else:\n e = 'Unrecognized redirection command: {}'.format(r)\n raise XonshError(e)\n except (ValueError, XonshError):\n raise\n except Exception:\n pass\n mode = _MODES.get(mode, None)\n if mode == 'r' and (len(orig) > 0 or len(dest) > 0):\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n elif mode in _WRITE_MODES and len(dest) > 0:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n return orig, mode, dest\n\n\ndef _redirect_streams(r, loc=None):\n \"\"\"Returns stdin, stdout, stderr tuple of redirections.\"\"\"\n stdin = stdout = stderr = None\n no_ampersand = r.replace('&', '')\n # special case of redirecting stderr to stdout\n if no_ampersand in _E2O_MAP:\n stderr = subprocess.STDOUT\n return stdin, stdout, stderr\n elif no_ampersand in _O2E_MAP:\n stdout = 2 # using 2 as a flag, rather than using a file object\n return stdin, stdout, stderr\n # get streams\n orig, mode, dest = _parse_redirects(r)\n if mode == 'r':\n stdin = safe_open(loc, mode)\n elif mode in _WRITE_MODES:\n if orig in _REDIR_ALL:\n stdout = stderr = safe_open(loc, mode)\n elif orig in _REDIR_OUT:\n stdout = safe_open(loc, mode)\n elif orig in _REDIR_ERR:\n stderr = safe_open(loc, mode)\n else:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n else:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n return stdin, stdout, stderr\n\n\ndef default_signal_pauser(n, f):\n \"\"\"Pauses a signal, as needed.\"\"\"\n signal.pause()\n\n\ndef no_pg_xonsh_preexec_fn():\n \"\"\"Default subprocess preexec function for when there is no existing\n pipeline group.\n \"\"\"\n os.setpgrp()\n signal.signal(signal.SIGTSTP, default_signal_pauser)\n\n\nclass SubprocSpec:\n \"\"\"A container for specifiying how a subprocess command should be\n executed.\n \"\"\"\n\n kwnames = ('stdin', 'stdout', 'stderr', 'universal_newlines')\n\n def __init__(self, cmd, *, cls=subprocess.Popen, stdin=None, stdout=None,\n stderr=None, universal_newlines=False, captured=False):\n \"\"\"\n Parameters\n ----------\n cmd : list of str\n Command to be run.\n cls : Popen-like\n Class to run the subprocess with.\n stdin : file-like\n Popen file descriptor or flag for stdin.\n stdout : file-like\n Popen file descriptor or flag for stdout.\n stderr : file-like\n Popen file descriptor or flag for stderr.\n universal_newlines : bool\n Whether or not to use universal newlines.\n captured : bool or str, optional\n The flag for if the subprocess is captured, may be one of:\n False for $[], 'stdout' for $(), 'hiddenobject' for ![], or\n 'object' for !().\n\n Attributes\n ----------\n args : list of str\n Arguments as originally supplied.\n alias : list of str, callable, or None\n The alias that was reolved for this command, if any.\n binary_loc : str or None\n Path to binary to execute.\n is_proxy : bool\n Whether or not the subprocess is or should be run as a proxy.\n background : bool\n Whether or not the subprocess should be started in the background.\n threadable : bool\n Whether or not the subprocess is able to be run in a background\n thread, rather than the main thread.\n last_in_pipeline : bool\n Whether the subprocess is the last in the execution pipeline.\n captured_stdout : file-like\n Handle to captured stdin\n captured_stderr : file-like\n Handle to captured stderr\n \"\"\"\n self._stdin = self._stdout = self._stderr = None\n # args\n self.cmd = list(cmd)\n self.cls = cls\n self.stdin = stdin\n self.stdout = stdout\n self.stderr = stderr\n self.universal_newlines = universal_newlines\n self.captured = captured\n # pure attrs\n self.args = list(cmd)\n self.alias = None\n self.binary_loc = None\n self.is_proxy = False\n self.background = False\n self.threadable = True\n self.last_in_pipeline = False\n self.captured_stdout = None\n self.captured_stderr = None\n\n def __str__(self):\n s = self.__class__.__name__ + '(' + str(self.cmd) + ', '\n s += self.cls.__name__ + ', '\n kws = [n + '=' + str(getattr(self, n)) for n in self.kwnames]\n s += ', '.join(kws) + ')'\n return s\n\n def __repr__(self):\n s = self.__class__.__name__ + '(' + repr(self.cmd) + ', '\n s += self.cls.__name__ + ', '\n kws = [n + '=' + repr(getattr(self, n)) for n in self.kwnames]\n s += ', '.join(kws) + ')'\n return s\n\n #\n # Properties\n #\n\n @property\n def stdin(self):\n return self._stdin\n\n @stdin.setter\n def stdin(self, value):\n if self._stdin is None:\n self._stdin = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple inputs for stdin for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n @property\n def stdout(self):\n return self._stdout\n\n @stdout.setter\n def stdout(self, value):\n if self._stdout is None:\n self._stdout = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple redirections for stdout for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n @property\n def stderr(self):\n return self._stderr\n\n @stderr.setter\n def stderr(self, value):\n if self._stderr is None:\n self._stderr = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple redirections for stderr for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n #\n # Execution methods\n #\n\n def run(self, *, pipeline_group=None):\n \"\"\"Launches the subprocess and returns the object.\"\"\"\n kwargs = {n: getattr(self, n) for n in self.kwnames}\n self.prep_env(kwargs)\n self.prep_preexec_fn(kwargs, pipeline_group=pipeline_group)\n if callable(self.alias):\n if 'preexec_fn' in kwargs:\n kwargs.pop('preexec_fn')\n p = self.cls(self.alias, self.cmd, **kwargs)\n else:\n self._fix_null_cmd_bytes()\n p = self._run_binary(kwargs)\n p.spec = self\n p.last_in_pipeline = self.last_in_pipeline\n p.captured_stdout = self.captured_stdout\n p.captured_stderr = self.captured_stderr\n return p\n\n def _run_binary(self, kwargs):\n try:\n bufsize = 1\n p = self.cls(self.cmd, bufsize=bufsize, **kwargs)\n except PermissionError:\n e = 'xonsh: subprocess mode: permission denied: {0}'\n raise XonshError(e.format(self.cmd[0]))\n except FileNotFoundError:\n cmd0 = self.cmd[0]\n e = 'xonsh: subprocess mode: command not found: {0}'.format(cmd0)\n env = builtins.__xonsh_env__\n sug = suggest_commands(cmd0, env, builtins.aliases)\n if len(sug.strip()) > 0:\n e += '\\n' + suggest_commands(cmd0, env, builtins.aliases)\n raise XonshError(e)\n return p\n\n def prep_env(self, kwargs):\n \"\"\"Prepares the environment to use in the subprocess.\"\"\"\n denv = builtins.__xonsh_env__.detype()\n if ON_WINDOWS:\n # Over write prompt variable as xonsh's $PROMPT does\n # not make much sense for other subprocs\n denv['PROMPT'] = '$P$G'\n kwargs['env'] = denv\n\n def prep_preexec_fn(self, kwargs, pipeline_group=None):\n \"\"\"Prepares the 'preexec_fn' keyword argument\"\"\"\n if not ON_POSIX:\n return\n if not builtins.__xonsh_env__.get('XONSH_INTERACTIVE'):\n return\n if pipeline_group is None:\n xonsh_preexec_fn = no_pg_xonsh_preexec_fn\n else:\n def xonsh_preexec_fn():\n \"\"\"Preexec function bound to a pipeline group.\"\"\"\n os.setpgid(0, pipeline_group)\n signal.signal(signal.SIGTSTP, default_signal_pauser)\n kwargs['preexec_fn'] = xonsh_preexec_fn\n\n def _fix_null_cmd_bytes(self):\n # Popen does not accept null bytes in its input commands.\n # that doesn;t stop some subproces from using them. Here we\n # escape them just in case.\n cmd = self.cmd\n for i in range(len(cmd)):\n cmd[i] = cmd[i].replace('\\0', '\\\\0')\n\n #\n # Building methods\n #\n\n @classmethod\n def build(kls, cmd, *, cls=subprocess.Popen, **kwargs):\n \"\"\"Creates an instance of the subprocess command, with any\n modifcations and adjustments based on the actual cmd that\n was recieved.\n \"\"\"\n # modifications that do not alter cmds may come before creating instance\n spec = kls(cmd, cls=cls, **kwargs)\n # modifications that alter cmds must come after creating instance\n # perform initial redirects\n spec.redirect_leading()\n spec.redirect_trailing()\n # apply aliases\n spec.resolve_alias()\n spec.resolve_binary_loc()\n spec.resolve_auto_cd()\n spec.resolve_executable_commands()\n spec.resolve_alias_cls()\n return spec\n\n def redirect_leading(self):\n \"\"\"Manage leading redirects such as with '< input.txt COMMAND'. \"\"\"\n while len(self.cmd) >= 3 and self.cmd[0] == '<':\n self.stdin = safe_open(self.cmd[1], 'r')\n self.cmd = self.cmd[2:]\n\n def redirect_trailing(self):\n \"\"\"Manages trailing redirects.\"\"\"\n while True:\n cmd = self.cmd\n if len(cmd) >= 3 and _is_redirect(cmd[-2]):\n streams = _redirect_streams(cmd[-2], cmd[-1])\n self.stdin, self.stdout, self.stderr = streams\n self.cmd = cmd[:-2]\n elif len(cmd) >= 2 and _is_redirect(cmd[-1]):\n streams = _redirect_streams(cmd[-1])\n self.stdin, self.stdout, self.stderr = streams\n self.cmd = cmd[:-1]\n else:\n break\n\n def resolve_alias(self):\n \"\"\"Sets alias in command, if applicable.\"\"\"\n cmd0 = self.cmd[0]\n if callable(cmd0):\n alias = cmd0\n else:\n alias = builtins.aliases.get(cmd0, None)\n self.alias = alias\n\n def resolve_binary_loc(self):\n \"\"\"Sets the binary location\"\"\"\n alias = self.alias\n if alias is None:\n binary_loc = locate_binary(self.cmd[0])\n elif callable(alias):\n binary_loc = None\n else:\n binary_loc = locate_binary(alias[0])\n self.binary_loc = binary_loc\n\n def resolve_auto_cd(self):\n \"\"\"Implements AUTO_CD functionality.\"\"\"\n if not (self.alias is None and\n self.binary_loc is None and\n len(self.cmd) == 1 and\n builtins.__xonsh_env__.get('AUTO_CD') and\n os.path.isdir(self.cmd[0])):\n return\n self.cmd.insert(0, 'cd')\n self.alias = builtins.aliases.get('cd', None)\n\n def resolve_executable_commands(self):\n \"\"\"Resolve command executables, if applicable.\"\"\"\n alias = self.alias\n if alias is None:\n pass\n elif callable(alias):\n self.cmd.pop(0)\n return\n else:\n self.cmd = alias + self.cmd[1:]\n # resolve any redirects the aliases may have applied\n self.redirect_leading()\n self.redirect_trailing()\n if self.binary_loc is None:\n return\n try:\n self.cmd = get_script_subproc_command(self.binary_loc, self.cmd[1:])\n except PermissionError:\n e = 'xonsh: subprocess mode: permission denied: {0}'\n raise XonshError(e.format(self.cmd[0]))\n\n def resolve_alias_cls(self):\n \"\"\"Determine which proxy class to run an alias with.\"\"\"\n alias = self.alias\n if not callable(alias):\n return\n self.is_proxy = True\n thable = getattr(alias, '__xonsh_threadable__', True)\n cls = ProcProxyThread if thable else ProcProxy\n self.cls = cls\n self.threadable = thable\n # also check capturablity, while we are here\n cpable = getattr(alias, '__xonsh_capturable__', self.captured)\n self.captured = cpable\n\n\ndef _safe_pipe_properties(fd, use_tty=False):\n \"\"\"Makes sure that a pipe file descriptor properties are sane.\"\"\"\n if not use_tty:\n return\n # due to some weird, long standing issue in Python, PTYs come out\n # replacing newline \\n with \\r\\n. This causes issues for raw unix\n # protocols, like git and ssh, which expect unix line endings.\n # see https://mail.python.org/pipermail/python-list/2013-June/650460.html\n # for more details and the following solution.\n props = termios.tcgetattr(fd)\n props[1] = props[1] & (~termios.ONLCR) | termios.ONLRET\n termios.tcsetattr(fd, termios.TCSANOW, props)\n\n\ndef _update_last_spec(last):\n captured = last.captured\n last.last_in_pipeline = True\n if not captured:\n return\n callable_alias = callable(last.alias)\n if callable_alias:\n pass\n else:\n cmds_cache = builtins.__xonsh_commands_cache__\n thable = (cmds_cache.predict_threadable(last.args) and\n cmds_cache.predict_threadable(last.cmd))\n if captured and thable:\n last.cls = PopenThread\n elif not thable:\n # foreground processes should use Popen\n last.threadable = False\n if captured == 'object' or captured == 'hiddenobject':\n # CommandPipeline objects should not pipe stdout, stderr\n return\n # cannot used PTY pipes for aliases, for some dark reason,\n # and must use normal pipes instead.\n use_tty = ON_POSIX and not callable_alias\n # Do not set standard in! Popen is not a fan of redirections here\n # set standard out\n if last.stdout is not None:\n last.universal_newlines = True\n elif captured in STDOUT_CAPTURE_KINDS:\n last.universal_newlines = False\n r, w = os.pipe()\n last.stdout = safe_open(w, 'wb')\n last.captured_stdout = safe_open(r, 'rb')\n elif builtins.__xonsh_stdout_uncaptured__ is not None:\n last.universal_newlines = True\n last.stdout = builtins.__xonsh_stdout_uncaptured__\n last.captured_stdout = last.stdout\n elif ON_WINDOWS and not callable_alias:\n last.universal_newlines = True\n last.stdout = None # must truly stream on windows\n last.captured_stdout = ConsoleParallelReader(1)\n else:\n last.universal_newlines = True\n r, w = pty.openpty() if use_tty else os.pipe()\n _safe_pipe_properties(w, use_tty=use_tty)\n last.stdout = safe_open(w, 'w')\n _safe_pipe_properties(r, use_tty=use_tty)\n last.captured_stdout = safe_open(r, 'r')\n # set standard error\n if last.stderr is not None:\n pass\n elif captured == 'object':\n r, w = os.pipe()\n last.stderr = safe_open(w, 'w')\n last.captured_stderr = safe_open(r, 'r')\n elif builtins.__xonsh_stderr_uncaptured__ is not None:\n last.stderr = builtins.__xonsh_stderr_uncaptured__\n last.captured_stderr = last.stderr\n elif ON_WINDOWS and not callable_alias:\n last.universal_newlines = True\n last.stderr = None # must truly stream on windows\n else:\n r, w = pty.openpty() if use_tty else os.pipe()\n _safe_pipe_properties(w, use_tty=use_tty)\n last.stderr = safe_open(w, 'w')\n _safe_pipe_properties(r, use_tty=use_tty)\n last.captured_stderr = safe_open(r, 'r')\n # redirect stdout to stderr, if we should\n if isinstance(last.stdout, int) and last.stdout == 2:\n # need to use private interface to avoid duplication.\n last._stdout = last.stderr\n\n\ndef cmds_to_specs(cmds, captured=False):\n \"\"\"Converts a list of cmds to a list of SubprocSpec objects that are\n ready to be executed.\n \"\"\"\n # first build the subprocs independently and separate from the redirects\n specs = []\n redirects = []\n for cmd in cmds:\n if isinstance(cmd, str):\n redirects.append(cmd)\n else:\n if cmd[-1] == '&':\n cmd = cmd[:-1]\n redirects.append('&')\n spec = SubprocSpec.build(cmd, captured=captured)\n specs.append(spec)\n # now modify the subprocs based on the redirects.\n for i, redirect in enumerate(redirects):\n if redirect == '|':\n # these should remain integer file descriptors, and not Python\n # file objects since they connect processes.\n r, w = os.pipe()\n specs[i].stdout = w\n specs[i + 1].stdin = r\n elif redirect == '&' and i == len(redirects) - 1:\n specs[-1].background = True\n else:\n raise XonshError('unrecognized redirect {0!r}'.format(redirect))\n # Apply boundry conditions\n _update_last_spec(specs[-1])\n return specs\n\n\ndef _should_set_title(captured=False):\n env = builtins.__xonsh_env__\n return (env.get('XONSH_INTERACTIVE') and\n not env.get('XONSH_STORE_STDOUT') and\n captured not in STDOUT_CAPTURE_KINDS and\n hasattr(builtins, '__xonsh_shell__'))\n\n\ndef run_subproc(cmds, captured=False):\n \"\"\"Runs a subprocess, in its many forms. This takes a list of 'commands,'\n which may be a list of command line arguments or a string, representing\n a special connecting character. For example::\n\n $ ls | grep wakka\n\n is represented by the following cmds::\n\n [['ls'], '|', ['grep', 'wakka']]\n\n Lastly, the captured argument affects only the last real command.\n \"\"\"\n specs = cmds_to_specs(cmds, captured=captured)\n captured = specs[-1].captured\n if captured == 'hiddenobject':\n command = HiddenCommandPipeline(specs)\n else:\n command = CommandPipeline(specs)\n proc = command.proc\n background = command.spec.background\n if not all(x.is_proxy for x in specs):\n add_job({\n 'cmds': cmds,\n 'pids': [i.pid for i in command.procs],\n 'obj': proc,\n 'bg': background,\n 'pipeline': command,\n 'pgrp': command.term_pgid,\n })\n if _should_set_title(captured=captured):\n # set title here to get currently executing command\n pause_call_resume(proc, builtins.__xonsh_shell__.settitle)\n # create command or return if backgrounding.\n if background:\n return\n # now figure out what we should return.\n if captured == 'stdout':\n command.end()\n return command.output\n elif captured == 'object':\n return command\n elif captured == 'hiddenobject':\n command.end()\n return command\n else:\n command.end()\n return\n\n\ndef subproc_captured_stdout(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns the stdout\n that was produced as a str.\n \"\"\"\n return run_subproc(cmds, captured='stdout')\n\n\ndef subproc_captured_inject(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns a list of\n whitespace-separated strings of the stdout that was produced.\n The string is split using xonsh's lexer, rather than Python's str.split()\n or shlex.split().\n \"\"\"\n s = run_subproc(cmds, captured='stdout')\n toks = builtins.__xonsh_execer__.parser.lexer.split(s)\n return toks\n\n\ndef subproc_captured_object(*cmds):\n \"\"\"\n Runs a subprocess, capturing the output. Returns an instance of\n CommandPipeline representing the completed command.\n \"\"\"\n return run_subproc(cmds, captured='object')\n\n\ndef subproc_captured_hiddenobject(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns an instance of\n HiddenCommandPipeline representing the completed command.\n \"\"\"\n return run_subproc(cmds, captured='hiddenobject')\n\n\ndef subproc_uncaptured(*cmds):\n \"\"\"Runs a subprocess, without capturing the output. Returns the stdout\n that was produced as a str.\n \"\"\"\n return run_subproc(cmds, captured=False)\n\n\ndef ensure_list_of_strs(x):\n \"\"\"Ensures that x is a list of strings.\"\"\"\n if isinstance(x, str):\n rtn = [x]\n elif isinstance(x, cabc.Sequence):\n rtn = [i if isinstance(i, str) else str(i) for i in x]\n else:\n rtn = [str(x)]\n return rtn\n\n\ndef list_of_strs_or_callables(x):\n \"\"\"Ensures that x is a list of strings or functions\"\"\"\n if isinstance(x, str) or callable(x):\n rtn = [x]\n elif isinstance(x, cabc.Iterable):\n rtn = [i if isinstance(i, str) or callable(i) else str(i) for i in x]\n else:\n rtn = [str(x)]\n return rtn\n\n\n@lazyobject\ndef MACRO_FLAG_KINDS():\n return {\n 's': str,\n 'str': str,\n 'string': str,\n 'a': AST,\n 'ast': AST,\n 'c': types.CodeType,\n 'code': types.CodeType,\n 'compile': types.CodeType,\n 'v': eval,\n 'eval': eval,\n 'x': exec,\n 'exec': exec,\n 't': type,\n 'type': type,\n }\n\n\ndef _convert_kind_flag(x):\n \"\"\"Puts a kind flag (string) a canonical form.\"\"\"\n x = x.lower()\n kind = MACRO_FLAG_KINDS.get(x, None)\n if kind is None:\n raise TypeError('{0!r} not a recognized macro type.'.format(x))\n return kind\n\n\ndef convert_macro_arg(raw_arg, kind, glbs, locs, *, name='<arg>',\n macroname='<macro>'):\n \"\"\"Converts a string macro argument based on the requested kind.\n\n Parameters\n ----------\n raw_arg : str\n The str reprensetaion of the macro argument.\n kind : object\n A flag or type representing how to convert the argument.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n name : str, optional\n The macro argument name.\n macroname : str, optional\n The name of the macro itself.\n\n Returns\n -------\n The converted argument.\n \"\"\"\n # munge kind and mode to start\n mode = None\n if isinstance(kind, cabc.Sequence) and not isinstance(kind, str):\n # have (kind, mode) tuple\n kind, mode = kind\n if isinstance(kind, str):\n kind = _convert_kind_flag(kind)\n if kind is str or kind is None:\n return raw_arg # short circut since there is nothing else to do\n # select from kind and convert\n execer = builtins.__xonsh_execer__\n filename = macroname + '(' + name + ')'\n if kind is AST:\n ctx = set(dir(builtins)) | set(glbs.keys())\n if locs is not None:\n ctx |= set(locs.keys())\n mode = mode or 'eval'\n arg = execer.parse(raw_arg, ctx, mode=mode, filename=filename)\n elif kind is types.CodeType or kind is compile: # NOQA\n mode = mode or 'eval'\n arg = execer.compile(raw_arg, mode=mode, glbs=glbs, locs=locs,\n filename=filename)\n elif kind is eval:\n arg = execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename)\n elif kind is exec:\n mode = mode or 'exec'\n if not raw_arg.endswith('\\n'):\n raw_arg += '\\n'\n arg = execer.exec(raw_arg, mode=mode, glbs=glbs, locs=locs,\n filename=filename)\n elif kind is type:\n arg = type(execer.eval(raw_arg, glbs=glbs, locs=locs,\n filename=filename))\n else:\n msg = ('kind={0!r} and mode={1!r} was not recongnized for macro '\n 'argument {2!r}')\n raise TypeError(msg.format(kind, mode, name))\n return arg\n\n\[email protected]\ndef in_macro_call(f, glbs, locs):\n \"\"\"Attaches macro globals and locals temporarily to function as a\n context manager.\n\n Parameters\n ----------\n f : callable object\n The function that is called as ``f(*args)``.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n \"\"\"\n prev_glbs = getattr(f, 'macro_globals', None)\n prev_locs = getattr(f, 'macro_locals', None)\n f.macro_globals = glbs\n f.macro_locals = locs\n yield\n if prev_glbs is None:\n del f.macro_globals\n else:\n f.macro_globals = prev_glbs\n if prev_locs is None:\n del f.macro_locals\n else:\n f.macro_locals = prev_locs\n\n\ndef call_macro(f, raw_args, glbs, locs):\n \"\"\"Calls a function as a macro, returning its result.\n\n Parameters\n ----------\n f : callable object\n The function that is called as ``f(*args)``.\n raw_args : tuple of str\n The str reprensetaion of arguments of that were passed into the\n macro. These strings will be parsed, compiled, evaled, or left as\n a string dependending on the annotations of f.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n \"\"\"\n sig = inspect.signature(f)\n empty = inspect.Parameter.empty\n macroname = f.__name__\n i = 0\n args = []\n for (key, param), raw_arg in zip(sig.parameters.items(), raw_args):\n i += 1\n if raw_arg == '*':\n break\n kind = param.annotation\n if kind is empty or kind is None:\n kind = str\n arg = convert_macro_arg(raw_arg, kind, glbs, locs, name=key,\n macroname=macroname)\n args.append(arg)\n reg_args, kwargs = _eval_regular_args(raw_args[i:], glbs, locs)\n args += reg_args\n with in_macro_call(f, glbs, locs):\n rtn = f(*args, **kwargs)\n return rtn\n\n\n@lazyobject\ndef KWARG_RE():\n return re.compile('([A-Za-z_]\\w*=|\\*\\*)')\n\n\ndef _starts_as_arg(s):\n \"\"\"Tests if a string starts as a non-kwarg string would.\"\"\"\n return KWARG_RE.match(s) is None\n\n\ndef _eval_regular_args(raw_args, glbs, locs):\n if not raw_args:\n return [], {}\n arglist = list(itertools.takewhile(_starts_as_arg, raw_args))\n kwarglist = raw_args[len(arglist):]\n execer = builtins.__xonsh_execer__\n if not arglist:\n args = arglist\n kwargstr = 'dict({})'.format(', '.join(kwarglist))\n kwargs = execer.eval(kwargstr, glbs=glbs, locs=locs)\n elif not kwarglist:\n argstr = '({},)'.format(', '.join(arglist))\n args = execer.eval(argstr, glbs=glbs, locs=locs)\n kwargs = {}\n else:\n argstr = '({},)'.format(', '.join(arglist))\n kwargstr = 'dict({})'.format(', '.join(kwarglist))\n both = '({}, {})'.format(argstr, kwargstr)\n args, kwargs = execer.eval(both, glbs=glbs, locs=locs)\n return args, kwargs\n\n\ndef enter_macro(obj, raw_block, glbs, locs):\n \"\"\"Prepares to enter a context manager macro by attaching the contents\n of the macro block, globals, and locals to the object. These modifications\n are made in-place and the original object is returned.\n\n\n Parameters\n ----------\n obj : context manager\n The object that is about to be entered via a with-statement.\n raw_block : str\n The str of the block that is the context body.\n This string will be parsed, compiled, evaled, or left as\n a string dependending on the return annotation of obj.__enter__.\n glbs : Mapping\n The globals from the context site.\n locs : Mapping or None\n The locals from the context site.\n\n Returns\n -------\n obj : context manager\n The same context manager but with the new macro information applied.\n \"\"\"\n # recurse down sequences\n if isinstance(obj, cabc.Sequence):\n for x in obj:\n enter_macro(x, raw_block, glbs, locs)\n return obj\n # convert block as needed\n kind = getattr(obj, '__xonsh_block__', str)\n macroname = getattr(obj, '__name__', '<context>')\n block = convert_macro_arg(raw_block, kind, glbs, locs, name='<with!>',\n macroname=macroname)\n # attach attrs\n obj.macro_globals = glbs\n obj.macro_locals = locs\n obj.macro_block = block\n return obj\n\n\ndef load_builtins(execer=None, ctx=None):\n \"\"\"Loads the xonsh builtins into the Python builtins. Sets the\n BUILTINS_LOADED variable to True.\n \"\"\"\n global BUILTINS_LOADED\n # private built-ins\n builtins.__xonsh_config__ = {}\n builtins.__xonsh_env__ = Env(default_env())\n builtins.__xonsh_help__ = helper\n builtins.__xonsh_superhelp__ = superhelper\n builtins.__xonsh_pathsearch__ = pathsearch\n builtins.__xonsh_globsearch__ = globsearch\n builtins.__xonsh_regexsearch__ = regexsearch\n builtins.__xonsh_glob__ = globpath\n builtins.__xonsh_expand_path__ = expand_path\n builtins.__xonsh_exit__ = False\n builtins.__xonsh_stdout_uncaptured__ = None\n builtins.__xonsh_stderr_uncaptured__ = None\n if hasattr(builtins, 'exit'):\n builtins.__xonsh_pyexit__ = builtins.exit\n del builtins.exit\n if hasattr(builtins, 'quit'):\n builtins.__xonsh_pyquit__ = builtins.quit\n del builtins.quit\n builtins.__xonsh_subproc_captured_stdout__ = subproc_captured_stdout\n builtins.__xonsh_subproc_captured_inject__ = subproc_captured_inject\n builtins.__xonsh_subproc_captured_object__ = subproc_captured_object\n builtins.__xonsh_subproc_captured_hiddenobject__ = subproc_captured_hiddenobject\n builtins.__xonsh_subproc_uncaptured__ = subproc_uncaptured\n builtins.__xonsh_execer__ = execer\n builtins.__xonsh_commands_cache__ = CommandsCache()\n builtins.__xonsh_all_jobs__ = {}\n builtins.__xonsh_ensure_list_of_strs__ = ensure_list_of_strs\n builtins.__xonsh_list_of_strs_or_callables__ = list_of_strs_or_callables\n builtins.__xonsh_completers__ = xonsh.completers.init.default_completers()\n builtins.__xonsh_call_macro__ = call_macro\n builtins.__xonsh_enter_macro__ = enter_macro\n builtins.__xonsh_path_literal__ = path_literal\n # public built-ins\n builtins.XonshError = XonshError\n builtins.XonshCalledProcessError = XonshCalledProcessError\n builtins.evalx = None if execer is None else execer.eval\n builtins.execx = None if execer is None else execer.exec\n builtins.compilex = None if execer is None else execer.compile\n builtins.events = events\n\n # sneak the path search functions into the aliases\n # Need this inline/lazy import here since we use locate_binary that\n # relies on __xonsh_env__ in default aliases\n builtins.default_aliases = builtins.aliases = Aliases(make_default_aliases())\n builtins.__xonsh_history__ = None\n atexit.register(_lastflush)\n for sig in AT_EXIT_SIGNALS:\n resetting_signal_handle(sig, _lastflush)\n BUILTINS_LOADED = True\n\n\ndef _lastflush(s=None, f=None):\n if hasattr(builtins, '__xonsh_history__'):\n if builtins.__xonsh_history__ is not None:\n builtins.__xonsh_history__.flush(at_exit=True)\n\n\ndef unload_builtins():\n \"\"\"Removes the xonsh builtins from the Python builtins, if the\n BUILTINS_LOADED is True, sets BUILTINS_LOADED to False, and returns.\n \"\"\"\n global BUILTINS_LOADED\n env = getattr(builtins, '__xonsh_env__', None)\n if isinstance(env, Env):\n env.undo_replace_env()\n if hasattr(builtins, '__xonsh_pyexit__'):\n builtins.exit = builtins.__xonsh_pyexit__\n if hasattr(builtins, '__xonsh_pyquit__'):\n builtins.quit = builtins.__xonsh_pyquit__\n if not BUILTINS_LOADED:\n return\n names = ['__xonsh_config__',\n '__xonsh_env__',\n '__xonsh_ctx__',\n '__xonsh_help__',\n '__xonsh_superhelp__',\n '__xonsh_pathsearch__',\n '__xonsh_globsearch__',\n '__xonsh_regexsearch__',\n '__xonsh_glob__',\n '__xonsh_expand_path__',\n '__xonsh_exit__',\n '__xonsh_stdout_uncaptured__',\n '__xonsh_stderr_uncaptured__',\n '__xonsh_pyexit__',\n '__xonsh_pyquit__',\n '__xonsh_subproc_captured_stdout__',\n '__xonsh_subproc_captured_inject__',\n '__xonsh_subproc_captured_object__',\n '__xonsh_subproc_captured_hiddenobject__',\n '__xonsh_subproc_uncaptured__',\n '__xonsh_execer__',\n '__xonsh_commands_cache__',\n '__xonsh_completers__',\n '__xonsh_call_macro__',\n '__xonsh_enter_macro__',\n '__xonsh_path_literal__',\n 'XonshError',\n 'XonshCalledProcessError',\n 'evalx',\n 'execx',\n 'compilex',\n 'default_aliases',\n '__xonsh_all_jobs__',\n '__xonsh_ensure_list_of_strs__',\n '__xonsh_list_of_strs_or_callables__',\n '__xonsh_history__',\n ]\n for name in names:\n if hasattr(builtins, name):\n delattr(builtins, name)\n BUILTINS_LOADED = False\n\n\[email protected]\ndef xonsh_builtins(execer=None):\n \"\"\"A context manager for using the xonsh builtins only in a limited\n scope. Likely useful in testing.\n \"\"\"\n load_builtins(execer=execer)\n yield\n unload_builtins()\n", "path": "xonsh/built_ins.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"The xonsh built-ins.\n\nNote that this module is named 'built_ins' so as not to be confused with the\nspecial Python builtins module.\n\"\"\"\nimport io\nimport os\nimport re\nimport sys\nimport types\nimport shlex\nimport signal\nimport atexit\nimport pathlib\nimport inspect\nimport builtins\nimport itertools\nimport subprocess\nimport contextlib\nimport collections.abc as cabc\n\nfrom xonsh.ast import AST\nfrom xonsh.lazyasd import LazyObject, lazyobject\nfrom xonsh.inspectors import Inspector\nfrom xonsh.aliases import Aliases, make_default_aliases\nfrom xonsh.environ import Env, default_env, locate_binary\nfrom xonsh.jobs import add_job\nfrom xonsh.platform import ON_POSIX, ON_WINDOWS\nfrom xonsh.proc import (\n PopenThread, ProcProxyThread, ProcProxy, ConsoleParallelReader,\n pause_call_resume, CommandPipeline, HiddenCommandPipeline,\n STDOUT_CAPTURE_KINDS)\nfrom xonsh.tools import (\n suggest_commands, expand_path, globpath, XonshError,\n XonshCalledProcessError\n)\nfrom xonsh.lazyimps import pty, termios\nfrom xonsh.commands_cache import CommandsCache\nfrom xonsh.events import events\n\nimport xonsh.completers.init\n\nBUILTINS_LOADED = False\nINSPECTOR = LazyObject(Inspector, globals(), 'INSPECTOR')\n\n\n@lazyobject\ndef AT_EXIT_SIGNALS():\n sigs = (signal.SIGABRT, signal.SIGFPE, signal.SIGILL, signal.SIGSEGV,\n signal.SIGTERM)\n if ON_POSIX:\n sigs += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP)\n return sigs\n\n\ndef resetting_signal_handle(sig, f):\n \"\"\"Sets a new signal handle that will automatically restore the old value\n once the new handle is finished.\n \"\"\"\n oldh = signal.getsignal(sig)\n\n def newh(s=None, frame=None):\n f(s, frame)\n signal.signal(sig, oldh)\n if sig != 0:\n sys.exit(sig)\n signal.signal(sig, newh)\n\n\ndef helper(x, name=''):\n \"\"\"Prints help about, and then returns that variable.\"\"\"\n INSPECTOR.pinfo(x, oname=name, detail_level=0)\n return x\n\n\ndef superhelper(x, name=''):\n \"\"\"Prints help about, and then returns that variable.\"\"\"\n INSPECTOR.pinfo(x, oname=name, detail_level=1)\n return x\n\n\ndef reglob(path, parts=None, i=None):\n \"\"\"Regular expression-based globbing.\"\"\"\n if parts is None:\n path = os.path.normpath(path)\n drive, tail = os.path.splitdrive(path)\n parts = tail.split(os.sep)\n d = os.sep if os.path.isabs(path) else '.'\n d = os.path.join(drive, d)\n return reglob(d, parts, i=0)\n base = subdir = path\n if i == 0:\n if not os.path.isabs(base):\n base = ''\n elif len(parts) > 1:\n i += 1\n regex = os.path.join(base, parts[i])\n if ON_WINDOWS:\n # currently unable to access regex backslash sequences\n # on Windows due to paths using \\.\n regex = regex.replace('\\\\', '\\\\\\\\')\n regex = re.compile(regex)\n files = os.listdir(subdir)\n files.sort()\n paths = []\n i1 = i + 1\n if i1 == len(parts):\n for f in files:\n p = os.path.join(base, f)\n if regex.fullmatch(p) is not None:\n paths.append(p)\n else:\n for f in files:\n p = os.path.join(base, f)\n if regex.fullmatch(p) is None or not os.path.isdir(p):\n continue\n paths += reglob(p, parts=parts, i=i1)\n return paths\n\n\ndef path_literal(s):\n s = expand_path(s)\n return pathlib.Path(s)\n\n\ndef regexsearch(s):\n s = expand_path(s)\n return reglob(s)\n\n\ndef globsearch(s):\n csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS')\n glob_sorted = builtins.__xonsh_env__.get('GLOB_SORTED')\n return globpath(s, ignore_case=(not csc), return_empty=True,\n sort_result=glob_sorted)\n\n\ndef pathsearch(func, s, pymode=False, pathobj=False):\n \"\"\"\n Takes a string and returns a list of file paths that match (regex, glob,\n or arbitrary search function). If pathobj=True, the return is a list of\n pathlib.Path objects instead of strings.\n \"\"\"\n if (not callable(func) or\n len(inspect.signature(func).parameters) != 1):\n error = \"%r is not a known path search function\"\n raise XonshError(error % func)\n o = func(s)\n if pathobj and pymode:\n o = list(map(pathlib.Path, o))\n no_match = [] if pymode else [s]\n return o if len(o) != 0 else no_match\n\n\nRE_SHEBANG = LazyObject(lambda: re.compile(r'#![ \\t]*(.+?)$'),\n globals(), 'RE_SHEBANG')\n\n\ndef _is_binary(fname, limit=80):\n with open(fname, 'rb') as f:\n for i in range(limit):\n char = f.read(1)\n if char == b'\\0':\n return True\n if char == b'\\n':\n return False\n if char == b'':\n return False\n return False\n\n\ndef _un_shebang(x):\n if x == '/usr/bin/env':\n return []\n elif any(x.startswith(i) for i in ['/usr/bin', '/usr/local/bin', '/bin']):\n x = os.path.basename(x)\n elif x.endswith('python') or x.endswith('python.exe'):\n x = 'python'\n if x == 'xonsh':\n return ['python', '-m', 'xonsh.main']\n return [x]\n\n\ndef get_script_subproc_command(fname, args):\n \"\"\"Given the name of a script outside the path, returns a list representing\n an appropriate subprocess command to execute the script. Raises\n PermissionError if the script is not executable.\n \"\"\"\n # make sure file is executable\n if not os.access(fname, os.X_OK):\n raise PermissionError\n if ON_POSIX and not os.access(fname, os.R_OK):\n # on some systems, some importnat programs (e.g. sudo) will have\n # execute permissions but not read/write permisions. This enables\n # things with the SUID set to be run. Needs to come before _is_binary()\n # is called, because that function tries to read the file.\n return [fname] + args\n elif _is_binary(fname):\n # if the file is a binary, we should call it directly\n return [fname] + args\n if ON_WINDOWS:\n # Windows can execute various filetypes directly\n # as given in PATHEXT\n _, ext = os.path.splitext(fname)\n if ext.upper() in builtins.__xonsh_env__.get('PATHEXT'):\n return [fname] + args\n # find interpreter\n with open(fname, 'rb') as f:\n first_line = f.readline().decode().strip()\n m = RE_SHEBANG.match(first_line)\n # xonsh is the default interpreter\n if m is None:\n interp = ['xonsh']\n else:\n interp = m.group(1).strip()\n if len(interp) > 0:\n interp = shlex.split(interp)\n else:\n interp = ['xonsh']\n if ON_WINDOWS:\n o = []\n for i in interp:\n o.extend(_un_shebang(i))\n interp = o\n return interp + [fname] + args\n\n\n@lazyobject\ndef _REDIR_REGEX():\n name = \"(o(?:ut)?|e(?:rr)?|a(?:ll)?|&?\\d?)\"\n return re.compile(\"{r}(>?>|<){r}$\".format(r=name))\n\n\n_MODES = LazyObject(lambda: {'>>': 'a', '>': 'w', '<': 'r'}, globals(),\n '_MODES')\n_WRITE_MODES = LazyObject(lambda: frozenset({'w', 'a'}), globals(),\n '_WRITE_MODES')\n_REDIR_ALL = LazyObject(lambda: frozenset({'&', 'a', 'all'}),\n globals(), '_REDIR_ALL')\n_REDIR_ERR = LazyObject(lambda: frozenset({'2', 'e', 'err'}), globals(),\n '_REDIR_ERR')\n_REDIR_OUT = LazyObject(lambda: frozenset({'', '1', 'o', 'out'}), globals(),\n '_REDIR_OUT')\n_E2O_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(e, o)\n for e in _REDIR_ERR\n for o in _REDIR_OUT\n if o != ''}), globals(), '_E2O_MAP')\n_O2E_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(o, e)\n for e in _REDIR_ERR\n for o in _REDIR_OUT\n if o != ''}), globals(), '_O2E_MAP')\n\n\ndef _is_redirect(x):\n return isinstance(x, str) and _REDIR_REGEX.match(x)\n\n\ndef safe_open(fname, mode, buffering=-1):\n \"\"\"Safely attempts to open a file in for xonsh subprocs.\"\"\"\n # file descriptors\n try:\n return io.open(fname, mode, buffering=buffering)\n except PermissionError:\n raise XonshError('xonsh: {0}: permission denied'.format(fname))\n except FileNotFoundError:\n raise XonshError('xonsh: {0}: no such file or directory'.format(fname))\n except Exception:\n raise XonshError('xonsh: {0}: unable to open file'.format(fname))\n\n\ndef safe_close(x):\n \"\"\"Safely attempts to close an object.\"\"\"\n if not isinstance(x, io.IOBase):\n return\n if x.closed:\n return\n try:\n x.close()\n except Exception:\n pass\n\n\ndef _parse_redirects(r, loc=None):\n \"\"\"returns origin, mode, destination tuple\"\"\"\n orig, mode, dest = _REDIR_REGEX.match(r).groups()\n # redirect to fd\n if dest.startswith('&'):\n try:\n dest = int(dest[1:])\n if loc is None:\n loc, dest = dest, '' # NOQA\n else:\n e = 'Unrecognized redirection command: {}'.format(r)\n raise XonshError(e)\n except (ValueError, XonshError):\n raise\n except Exception:\n pass\n mode = _MODES.get(mode, None)\n if mode == 'r' and (len(orig) > 0 or len(dest) > 0):\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n elif mode in _WRITE_MODES and len(dest) > 0:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n return orig, mode, dest\n\n\ndef _redirect_streams(r, loc=None):\n \"\"\"Returns stdin, stdout, stderr tuple of redirections.\"\"\"\n stdin = stdout = stderr = None\n no_ampersand = r.replace('&', '')\n # special case of redirecting stderr to stdout\n if no_ampersand in _E2O_MAP:\n stderr = subprocess.STDOUT\n return stdin, stdout, stderr\n elif no_ampersand in _O2E_MAP:\n stdout = 2 # using 2 as a flag, rather than using a file object\n return stdin, stdout, stderr\n # get streams\n orig, mode, dest = _parse_redirects(r)\n if mode == 'r':\n stdin = safe_open(loc, mode)\n elif mode in _WRITE_MODES:\n if orig in _REDIR_ALL:\n stdout = stderr = safe_open(loc, mode)\n elif orig in _REDIR_OUT:\n stdout = safe_open(loc, mode)\n elif orig in _REDIR_ERR:\n stderr = safe_open(loc, mode)\n else:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n else:\n raise XonshError('Unrecognized redirection command: {}'.format(r))\n return stdin, stdout, stderr\n\n\ndef default_signal_pauser(n, f):\n \"\"\"Pauses a signal, as needed.\"\"\"\n signal.pause()\n\n\ndef no_pg_xonsh_preexec_fn():\n \"\"\"Default subprocess preexec function for when there is no existing\n pipeline group.\n \"\"\"\n os.setpgrp()\n signal.signal(signal.SIGTSTP, default_signal_pauser)\n\n\nclass SubprocSpec:\n \"\"\"A container for specifiying how a subprocess command should be\n executed.\n \"\"\"\n\n kwnames = ('stdin', 'stdout', 'stderr', 'universal_newlines')\n\n def __init__(self, cmd, *, cls=subprocess.Popen, stdin=None, stdout=None,\n stderr=None, universal_newlines=False, captured=False):\n \"\"\"\n Parameters\n ----------\n cmd : list of str\n Command to be run.\n cls : Popen-like\n Class to run the subprocess with.\n stdin : file-like\n Popen file descriptor or flag for stdin.\n stdout : file-like\n Popen file descriptor or flag for stdout.\n stderr : file-like\n Popen file descriptor or flag for stderr.\n universal_newlines : bool\n Whether or not to use universal newlines.\n captured : bool or str, optional\n The flag for if the subprocess is captured, may be one of:\n False for $[], 'stdout' for $(), 'hiddenobject' for ![], or\n 'object' for !().\n\n Attributes\n ----------\n args : list of str\n Arguments as originally supplied.\n alias : list of str, callable, or None\n The alias that was reolved for this command, if any.\n binary_loc : str or None\n Path to binary to execute.\n is_proxy : bool\n Whether or not the subprocess is or should be run as a proxy.\n background : bool\n Whether or not the subprocess should be started in the background.\n threadable : bool\n Whether or not the subprocess is able to be run in a background\n thread, rather than the main thread.\n last_in_pipeline : bool\n Whether the subprocess is the last in the execution pipeline.\n captured_stdout : file-like\n Handle to captured stdin\n captured_stderr : file-like\n Handle to captured stderr\n \"\"\"\n self._stdin = self._stdout = self._stderr = None\n # args\n self.cmd = list(cmd)\n self.cls = cls\n self.stdin = stdin\n self.stdout = stdout\n self.stderr = stderr\n self.universal_newlines = universal_newlines\n self.captured = captured\n # pure attrs\n self.args = list(cmd)\n self.alias = None\n self.binary_loc = None\n self.is_proxy = False\n self.background = False\n self.threadable = True\n self.last_in_pipeline = False\n self.captured_stdout = None\n self.captured_stderr = None\n\n def __str__(self):\n s = self.__class__.__name__ + '(' + str(self.cmd) + ', '\n s += self.cls.__name__ + ', '\n kws = [n + '=' + str(getattr(self, n)) for n in self.kwnames]\n s += ', '.join(kws) + ')'\n return s\n\n def __repr__(self):\n s = self.__class__.__name__ + '(' + repr(self.cmd) + ', '\n s += self.cls.__name__ + ', '\n kws = [n + '=' + repr(getattr(self, n)) for n in self.kwnames]\n s += ', '.join(kws) + ')'\n return s\n\n #\n # Properties\n #\n\n @property\n def stdin(self):\n return self._stdin\n\n @stdin.setter\n def stdin(self, value):\n if self._stdin is None:\n self._stdin = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple inputs for stdin for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n @property\n def stdout(self):\n return self._stdout\n\n @stdout.setter\n def stdout(self, value):\n if self._stdout is None:\n self._stdout = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple redirections for stdout for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n @property\n def stderr(self):\n return self._stderr\n\n @stderr.setter\n def stderr(self, value):\n if self._stderr is None:\n self._stderr = value\n elif value is None:\n pass\n else:\n safe_close(value)\n msg = 'Multiple redirections for stderr for {0!r}'\n msg = msg.format(' '.join(self.args))\n raise XonshError(msg)\n\n #\n # Execution methods\n #\n\n def run(self, *, pipeline_group=None):\n \"\"\"Launches the subprocess and returns the object.\"\"\"\n kwargs = {n: getattr(self, n) for n in self.kwnames}\n self.prep_env(kwargs)\n self.prep_preexec_fn(kwargs, pipeline_group=pipeline_group)\n if callable(self.alias):\n if 'preexec_fn' in kwargs:\n kwargs.pop('preexec_fn')\n p = self.cls(self.alias, self.cmd, **kwargs)\n else:\n self._fix_null_cmd_bytes()\n p = self._run_binary(kwargs)\n p.spec = self\n p.last_in_pipeline = self.last_in_pipeline\n p.captured_stdout = self.captured_stdout\n p.captured_stderr = self.captured_stderr\n return p\n\n def _run_binary(self, kwargs):\n try:\n bufsize = 1\n p = self.cls(self.cmd, bufsize=bufsize, **kwargs)\n except PermissionError:\n e = 'xonsh: subprocess mode: permission denied: {0}'\n raise XonshError(e.format(self.cmd[0]))\n except FileNotFoundError:\n cmd0 = self.cmd[0]\n e = 'xonsh: subprocess mode: command not found: {0}'.format(cmd0)\n env = builtins.__xonsh_env__\n sug = suggest_commands(cmd0, env, builtins.aliases)\n if len(sug.strip()) > 0:\n e += '\\n' + suggest_commands(cmd0, env, builtins.aliases)\n raise XonshError(e)\n return p\n\n def prep_env(self, kwargs):\n \"\"\"Prepares the environment to use in the subprocess.\"\"\"\n denv = builtins.__xonsh_env__.detype()\n if ON_WINDOWS:\n # Over write prompt variable as xonsh's $PROMPT does\n # not make much sense for other subprocs\n denv['PROMPT'] = '$P$G'\n kwargs['env'] = denv\n\n def prep_preexec_fn(self, kwargs, pipeline_group=None):\n \"\"\"Prepares the 'preexec_fn' keyword argument\"\"\"\n if not ON_POSIX:\n return\n if not builtins.__xonsh_env__.get('XONSH_INTERACTIVE'):\n return\n if pipeline_group is None:\n xonsh_preexec_fn = no_pg_xonsh_preexec_fn\n else:\n def xonsh_preexec_fn():\n \"\"\"Preexec function bound to a pipeline group.\"\"\"\n os.setpgid(0, pipeline_group)\n signal.signal(signal.SIGTSTP, default_signal_pauser)\n kwargs['preexec_fn'] = xonsh_preexec_fn\n\n def _fix_null_cmd_bytes(self):\n # Popen does not accept null bytes in its input commands.\n # that doesn;t stop some subproces from using them. Here we\n # escape them just in case.\n cmd = self.cmd\n for i in range(len(cmd)):\n cmd[i] = cmd[i].replace('\\0', '\\\\0')\n\n #\n # Building methods\n #\n\n @classmethod\n def build(kls, cmd, *, cls=subprocess.Popen, **kwargs):\n \"\"\"Creates an instance of the subprocess command, with any\n modifcations and adjustments based on the actual cmd that\n was recieved.\n \"\"\"\n # modifications that do not alter cmds may come before creating instance\n spec = kls(cmd, cls=cls, **kwargs)\n # modifications that alter cmds must come after creating instance\n # perform initial redirects\n spec.redirect_leading()\n spec.redirect_trailing()\n # apply aliases\n spec.resolve_alias()\n spec.resolve_binary_loc()\n spec.resolve_auto_cd()\n spec.resolve_executable_commands()\n spec.resolve_alias_cls()\n return spec\n\n def redirect_leading(self):\n \"\"\"Manage leading redirects such as with '< input.txt COMMAND'. \"\"\"\n while len(self.cmd) >= 3 and self.cmd[0] == '<':\n self.stdin = safe_open(self.cmd[1], 'r')\n self.cmd = self.cmd[2:]\n\n def redirect_trailing(self):\n \"\"\"Manages trailing redirects.\"\"\"\n while True:\n cmd = self.cmd\n if len(cmd) >= 3 and _is_redirect(cmd[-2]):\n streams = _redirect_streams(cmd[-2], cmd[-1])\n self.stdin, self.stdout, self.stderr = streams\n self.cmd = cmd[:-2]\n elif len(cmd) >= 2 and _is_redirect(cmd[-1]):\n streams = _redirect_streams(cmd[-1])\n self.stdin, self.stdout, self.stderr = streams\n self.cmd = cmd[:-1]\n else:\n break\n\n def resolve_alias(self):\n \"\"\"Sets alias in command, if applicable.\"\"\"\n cmd0 = self.cmd[0]\n if callable(cmd0):\n alias = cmd0\n else:\n alias = builtins.aliases.get(cmd0, None)\n self.alias = alias\n\n def resolve_binary_loc(self):\n \"\"\"Sets the binary location\"\"\"\n alias = self.alias\n if alias is None:\n binary_loc = locate_binary(self.cmd[0])\n elif callable(alias):\n binary_loc = None\n else:\n binary_loc = locate_binary(alias[0])\n self.binary_loc = binary_loc\n\n def resolve_auto_cd(self):\n \"\"\"Implements AUTO_CD functionality.\"\"\"\n if not (self.alias is None and\n self.binary_loc is None and\n len(self.cmd) == 1 and\n builtins.__xonsh_env__.get('AUTO_CD') and\n os.path.isdir(self.cmd[0])):\n return\n self.cmd.insert(0, 'cd')\n self.alias = builtins.aliases.get('cd', None)\n\n def resolve_executable_commands(self):\n \"\"\"Resolve command executables, if applicable.\"\"\"\n alias = self.alias\n if alias is None:\n pass\n elif callable(alias):\n self.cmd.pop(0)\n return\n else:\n self.cmd = alias + self.cmd[1:]\n # resolve any redirects the aliases may have applied\n self.redirect_leading()\n self.redirect_trailing()\n if self.binary_loc is None:\n return\n try:\n self.cmd = get_script_subproc_command(self.binary_loc, self.cmd[1:])\n except PermissionError:\n e = 'xonsh: subprocess mode: permission denied: {0}'\n raise XonshError(e.format(self.cmd[0]))\n\n def resolve_alias_cls(self):\n \"\"\"Determine which proxy class to run an alias with.\"\"\"\n alias = self.alias\n if not callable(alias):\n return\n self.is_proxy = True\n thable = getattr(alias, '__xonsh_threadable__', True)\n cls = ProcProxyThread if thable else ProcProxy\n self.cls = cls\n self.threadable = thable\n # also check capturablity, while we are here\n cpable = getattr(alias, '__xonsh_capturable__', self.captured)\n self.captured = cpable\n\n\ndef _safe_pipe_properties(fd, use_tty=False):\n \"\"\"Makes sure that a pipe file descriptor properties are sane.\"\"\"\n if not use_tty:\n return\n # due to some weird, long standing issue in Python, PTYs come out\n # replacing newline \\n with \\r\\n. This causes issues for raw unix\n # protocols, like git and ssh, which expect unix line endings.\n # see https://mail.python.org/pipermail/python-list/2013-June/650460.html\n # for more details and the following solution.\n props = termios.tcgetattr(fd)\n props[1] = props[1] & (~termios.ONLCR) | termios.ONLRET\n termios.tcsetattr(fd, termios.TCSANOW, props)\n\n\ndef _update_last_spec(last):\n captured = last.captured\n last.last_in_pipeline = True\n if not captured:\n return\n callable_alias = callable(last.alias)\n if callable_alias:\n pass\n else:\n cmds_cache = builtins.__xonsh_commands_cache__\n thable = (cmds_cache.predict_threadable(last.args) and\n cmds_cache.predict_threadable(last.cmd))\n if captured and thable:\n last.cls = PopenThread\n elif not thable:\n # foreground processes should use Popen\n last.threadable = False\n if captured == 'object' or captured == 'hiddenobject':\n # CommandPipeline objects should not pipe stdout, stderr\n return\n # cannot used PTY pipes for aliases, for some dark reason,\n # and must use normal pipes instead.\n use_tty = ON_POSIX and not callable_alias\n # Do not set standard in! Popen is not a fan of redirections here\n # set standard out\n if last.stdout is not None:\n last.universal_newlines = True\n elif captured in STDOUT_CAPTURE_KINDS:\n last.universal_newlines = False\n r, w = os.pipe()\n last.stdout = safe_open(w, 'wb')\n last.captured_stdout = safe_open(r, 'rb')\n elif builtins.__xonsh_stdout_uncaptured__ is not None:\n last.universal_newlines = True\n last.stdout = builtins.__xonsh_stdout_uncaptured__\n last.captured_stdout = last.stdout\n elif ON_WINDOWS and not callable_alias:\n last.universal_newlines = True\n last.stdout = None # must truly stream on windows\n last.captured_stdout = ConsoleParallelReader(1)\n else:\n last.universal_newlines = True\n r, w = pty.openpty() if use_tty else os.pipe()\n _safe_pipe_properties(w, use_tty=use_tty)\n last.stdout = safe_open(w, 'w')\n _safe_pipe_properties(r, use_tty=use_tty)\n last.captured_stdout = safe_open(r, 'r')\n # set standard error\n if last.stderr is not None:\n pass\n elif captured == 'object':\n r, w = os.pipe()\n last.stderr = safe_open(w, 'w')\n last.captured_stderr = safe_open(r, 'r')\n elif builtins.__xonsh_stderr_uncaptured__ is not None:\n last.stderr = builtins.__xonsh_stderr_uncaptured__\n last.captured_stderr = last.stderr\n elif ON_WINDOWS and not callable_alias:\n last.universal_newlines = True\n last.stderr = None # must truly stream on windows\n else:\n r, w = pty.openpty() if use_tty else os.pipe()\n _safe_pipe_properties(w, use_tty=use_tty)\n last.stderr = safe_open(w, 'w')\n _safe_pipe_properties(r, use_tty=use_tty)\n last.captured_stderr = safe_open(r, 'r')\n # redirect stdout to stderr, if we should\n if isinstance(last.stdout, int) and last.stdout == 2:\n # need to use private interface to avoid duplication.\n last._stdout = last.stderr\n\n\ndef cmds_to_specs(cmds, captured=False):\n \"\"\"Converts a list of cmds to a list of SubprocSpec objects that are\n ready to be executed.\n \"\"\"\n # first build the subprocs independently and separate from the redirects\n specs = []\n redirects = []\n for cmd in cmds:\n if isinstance(cmd, str):\n redirects.append(cmd)\n else:\n if cmd[-1] == '&':\n cmd = cmd[:-1]\n redirects.append('&')\n spec = SubprocSpec.build(cmd, captured=captured)\n specs.append(spec)\n # now modify the subprocs based on the redirects.\n for i, redirect in enumerate(redirects):\n if redirect == '|':\n # these should remain integer file descriptors, and not Python\n # file objects since they connect processes.\n r, w = os.pipe()\n specs[i].stdout = w\n specs[i + 1].stdin = r\n elif redirect == '&' and i == len(redirects) - 1:\n specs[-1].background = True\n else:\n raise XonshError('unrecognized redirect {0!r}'.format(redirect))\n # Apply boundry conditions\n _update_last_spec(specs[-1])\n return specs\n\n\ndef _should_set_title(captured=False):\n env = builtins.__xonsh_env__\n return (env.get('XONSH_INTERACTIVE') and\n not env.get('XONSH_STORE_STDOUT') and\n captured not in STDOUT_CAPTURE_KINDS and\n hasattr(builtins, '__xonsh_shell__'))\n\n\ndef run_subproc(cmds, captured=False):\n \"\"\"Runs a subprocess, in its many forms. This takes a list of 'commands,'\n which may be a list of command line arguments or a string, representing\n a special connecting character. For example::\n\n $ ls | grep wakka\n\n is represented by the following cmds::\n\n [['ls'], '|', ['grep', 'wakka']]\n\n Lastly, the captured argument affects only the last real command.\n \"\"\"\n specs = cmds_to_specs(cmds, captured=captured)\n captured = specs[-1].captured\n if captured == 'hiddenobject':\n command = HiddenCommandPipeline(specs)\n else:\n command = CommandPipeline(specs)\n proc = command.proc\n background = command.spec.background\n if not all(x.is_proxy for x in specs):\n add_job({\n 'cmds': cmds,\n 'pids': [i.pid for i in command.procs],\n 'obj': proc,\n 'bg': background,\n 'pipeline': command,\n 'pgrp': command.term_pgid,\n })\n if _should_set_title(captured=captured):\n # set title here to get currently executing command\n pause_call_resume(proc, builtins.__xonsh_shell__.settitle)\n # create command or return if backgrounding.\n if background:\n return\n # now figure out what we should return.\n if captured == 'stdout':\n command.end()\n return command.output\n elif captured == 'object':\n return command\n elif captured == 'hiddenobject':\n command.end()\n return command\n else:\n command.end()\n return\n\n\ndef subproc_captured_stdout(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns the stdout\n that was produced as a str.\n \"\"\"\n return run_subproc(cmds, captured='stdout')\n\n\ndef subproc_captured_inject(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns a list of\n whitespace-separated strings of the stdout that was produced.\n The string is split using xonsh's lexer, rather than Python's str.split()\n or shlex.split().\n \"\"\"\n s = run_subproc(cmds, captured='stdout')\n toks = builtins.__xonsh_execer__.parser.lexer.split(s.strip())\n return toks\n\n\ndef subproc_captured_object(*cmds):\n \"\"\"\n Runs a subprocess, capturing the output. Returns an instance of\n CommandPipeline representing the completed command.\n \"\"\"\n return run_subproc(cmds, captured='object')\n\n\ndef subproc_captured_hiddenobject(*cmds):\n \"\"\"Runs a subprocess, capturing the output. Returns an instance of\n HiddenCommandPipeline representing the completed command.\n \"\"\"\n return run_subproc(cmds, captured='hiddenobject')\n\n\ndef subproc_uncaptured(*cmds):\n \"\"\"Runs a subprocess, without capturing the output. Returns the stdout\n that was produced as a str.\n \"\"\"\n return run_subproc(cmds, captured=False)\n\n\ndef ensure_list_of_strs(x):\n \"\"\"Ensures that x is a list of strings.\"\"\"\n if isinstance(x, str):\n rtn = [x]\n elif isinstance(x, cabc.Sequence):\n rtn = [i if isinstance(i, str) else str(i) for i in x]\n else:\n rtn = [str(x)]\n return rtn\n\n\ndef list_of_strs_or_callables(x):\n \"\"\"Ensures that x is a list of strings or functions\"\"\"\n if isinstance(x, str) or callable(x):\n rtn = [x]\n elif isinstance(x, cabc.Iterable):\n rtn = [i if isinstance(i, str) or callable(i) else str(i) for i in x]\n else:\n rtn = [str(x)]\n return rtn\n\n\n@lazyobject\ndef MACRO_FLAG_KINDS():\n return {\n 's': str,\n 'str': str,\n 'string': str,\n 'a': AST,\n 'ast': AST,\n 'c': types.CodeType,\n 'code': types.CodeType,\n 'compile': types.CodeType,\n 'v': eval,\n 'eval': eval,\n 'x': exec,\n 'exec': exec,\n 't': type,\n 'type': type,\n }\n\n\ndef _convert_kind_flag(x):\n \"\"\"Puts a kind flag (string) a canonical form.\"\"\"\n x = x.lower()\n kind = MACRO_FLAG_KINDS.get(x, None)\n if kind is None:\n raise TypeError('{0!r} not a recognized macro type.'.format(x))\n return kind\n\n\ndef convert_macro_arg(raw_arg, kind, glbs, locs, *, name='<arg>',\n macroname='<macro>'):\n \"\"\"Converts a string macro argument based on the requested kind.\n\n Parameters\n ----------\n raw_arg : str\n The str reprensetaion of the macro argument.\n kind : object\n A flag or type representing how to convert the argument.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n name : str, optional\n The macro argument name.\n macroname : str, optional\n The name of the macro itself.\n\n Returns\n -------\n The converted argument.\n \"\"\"\n # munge kind and mode to start\n mode = None\n if isinstance(kind, cabc.Sequence) and not isinstance(kind, str):\n # have (kind, mode) tuple\n kind, mode = kind\n if isinstance(kind, str):\n kind = _convert_kind_flag(kind)\n if kind is str or kind is None:\n return raw_arg # short circut since there is nothing else to do\n # select from kind and convert\n execer = builtins.__xonsh_execer__\n filename = macroname + '(' + name + ')'\n if kind is AST:\n ctx = set(dir(builtins)) | set(glbs.keys())\n if locs is not None:\n ctx |= set(locs.keys())\n mode = mode or 'eval'\n arg = execer.parse(raw_arg, ctx, mode=mode, filename=filename)\n elif kind is types.CodeType or kind is compile: # NOQA\n mode = mode or 'eval'\n arg = execer.compile(raw_arg, mode=mode, glbs=glbs, locs=locs,\n filename=filename)\n elif kind is eval:\n arg = execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename)\n elif kind is exec:\n mode = mode or 'exec'\n if not raw_arg.endswith('\\n'):\n raw_arg += '\\n'\n arg = execer.exec(raw_arg, mode=mode, glbs=glbs, locs=locs,\n filename=filename)\n elif kind is type:\n arg = type(execer.eval(raw_arg, glbs=glbs, locs=locs,\n filename=filename))\n else:\n msg = ('kind={0!r} and mode={1!r} was not recongnized for macro '\n 'argument {2!r}')\n raise TypeError(msg.format(kind, mode, name))\n return arg\n\n\[email protected]\ndef in_macro_call(f, glbs, locs):\n \"\"\"Attaches macro globals and locals temporarily to function as a\n context manager.\n\n Parameters\n ----------\n f : callable object\n The function that is called as ``f(*args)``.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n \"\"\"\n prev_glbs = getattr(f, 'macro_globals', None)\n prev_locs = getattr(f, 'macro_locals', None)\n f.macro_globals = glbs\n f.macro_locals = locs\n yield\n if prev_glbs is None:\n del f.macro_globals\n else:\n f.macro_globals = prev_glbs\n if prev_locs is None:\n del f.macro_locals\n else:\n f.macro_locals = prev_locs\n\n\ndef call_macro(f, raw_args, glbs, locs):\n \"\"\"Calls a function as a macro, returning its result.\n\n Parameters\n ----------\n f : callable object\n The function that is called as ``f(*args)``.\n raw_args : tuple of str\n The str reprensetaion of arguments of that were passed into the\n macro. These strings will be parsed, compiled, evaled, or left as\n a string dependending on the annotations of f.\n glbs : Mapping\n The globals from the call site.\n locs : Mapping or None\n The locals from the call site.\n \"\"\"\n sig = inspect.signature(f)\n empty = inspect.Parameter.empty\n macroname = f.__name__\n i = 0\n args = []\n for (key, param), raw_arg in zip(sig.parameters.items(), raw_args):\n i += 1\n if raw_arg == '*':\n break\n kind = param.annotation\n if kind is empty or kind is None:\n kind = str\n arg = convert_macro_arg(raw_arg, kind, glbs, locs, name=key,\n macroname=macroname)\n args.append(arg)\n reg_args, kwargs = _eval_regular_args(raw_args[i:], glbs, locs)\n args += reg_args\n with in_macro_call(f, glbs, locs):\n rtn = f(*args, **kwargs)\n return rtn\n\n\n@lazyobject\ndef KWARG_RE():\n return re.compile('([A-Za-z_]\\w*=|\\*\\*)')\n\n\ndef _starts_as_arg(s):\n \"\"\"Tests if a string starts as a non-kwarg string would.\"\"\"\n return KWARG_RE.match(s) is None\n\n\ndef _eval_regular_args(raw_args, glbs, locs):\n if not raw_args:\n return [], {}\n arglist = list(itertools.takewhile(_starts_as_arg, raw_args))\n kwarglist = raw_args[len(arglist):]\n execer = builtins.__xonsh_execer__\n if not arglist:\n args = arglist\n kwargstr = 'dict({})'.format(', '.join(kwarglist))\n kwargs = execer.eval(kwargstr, glbs=glbs, locs=locs)\n elif not kwarglist:\n argstr = '({},)'.format(', '.join(arglist))\n args = execer.eval(argstr, glbs=glbs, locs=locs)\n kwargs = {}\n else:\n argstr = '({},)'.format(', '.join(arglist))\n kwargstr = 'dict({})'.format(', '.join(kwarglist))\n both = '({}, {})'.format(argstr, kwargstr)\n args, kwargs = execer.eval(both, glbs=glbs, locs=locs)\n return args, kwargs\n\n\ndef enter_macro(obj, raw_block, glbs, locs):\n \"\"\"Prepares to enter a context manager macro by attaching the contents\n of the macro block, globals, and locals to the object. These modifications\n are made in-place and the original object is returned.\n\n\n Parameters\n ----------\n obj : context manager\n The object that is about to be entered via a with-statement.\n raw_block : str\n The str of the block that is the context body.\n This string will be parsed, compiled, evaled, or left as\n a string dependending on the return annotation of obj.__enter__.\n glbs : Mapping\n The globals from the context site.\n locs : Mapping or None\n The locals from the context site.\n\n Returns\n -------\n obj : context manager\n The same context manager but with the new macro information applied.\n \"\"\"\n # recurse down sequences\n if isinstance(obj, cabc.Sequence):\n for x in obj:\n enter_macro(x, raw_block, glbs, locs)\n return obj\n # convert block as needed\n kind = getattr(obj, '__xonsh_block__', str)\n macroname = getattr(obj, '__name__', '<context>')\n block = convert_macro_arg(raw_block, kind, glbs, locs, name='<with!>',\n macroname=macroname)\n # attach attrs\n obj.macro_globals = glbs\n obj.macro_locals = locs\n obj.macro_block = block\n return obj\n\n\ndef load_builtins(execer=None, ctx=None):\n \"\"\"Loads the xonsh builtins into the Python builtins. Sets the\n BUILTINS_LOADED variable to True.\n \"\"\"\n global BUILTINS_LOADED\n # private built-ins\n builtins.__xonsh_config__ = {}\n builtins.__xonsh_env__ = Env(default_env())\n builtins.__xonsh_help__ = helper\n builtins.__xonsh_superhelp__ = superhelper\n builtins.__xonsh_pathsearch__ = pathsearch\n builtins.__xonsh_globsearch__ = globsearch\n builtins.__xonsh_regexsearch__ = regexsearch\n builtins.__xonsh_glob__ = globpath\n builtins.__xonsh_expand_path__ = expand_path\n builtins.__xonsh_exit__ = False\n builtins.__xonsh_stdout_uncaptured__ = None\n builtins.__xonsh_stderr_uncaptured__ = None\n if hasattr(builtins, 'exit'):\n builtins.__xonsh_pyexit__ = builtins.exit\n del builtins.exit\n if hasattr(builtins, 'quit'):\n builtins.__xonsh_pyquit__ = builtins.quit\n del builtins.quit\n builtins.__xonsh_subproc_captured_stdout__ = subproc_captured_stdout\n builtins.__xonsh_subproc_captured_inject__ = subproc_captured_inject\n builtins.__xonsh_subproc_captured_object__ = subproc_captured_object\n builtins.__xonsh_subproc_captured_hiddenobject__ = subproc_captured_hiddenobject\n builtins.__xonsh_subproc_uncaptured__ = subproc_uncaptured\n builtins.__xonsh_execer__ = execer\n builtins.__xonsh_commands_cache__ = CommandsCache()\n builtins.__xonsh_all_jobs__ = {}\n builtins.__xonsh_ensure_list_of_strs__ = ensure_list_of_strs\n builtins.__xonsh_list_of_strs_or_callables__ = list_of_strs_or_callables\n builtins.__xonsh_completers__ = xonsh.completers.init.default_completers()\n builtins.__xonsh_call_macro__ = call_macro\n builtins.__xonsh_enter_macro__ = enter_macro\n builtins.__xonsh_path_literal__ = path_literal\n # public built-ins\n builtins.XonshError = XonshError\n builtins.XonshCalledProcessError = XonshCalledProcessError\n builtins.evalx = None if execer is None else execer.eval\n builtins.execx = None if execer is None else execer.exec\n builtins.compilex = None if execer is None else execer.compile\n builtins.events = events\n\n # sneak the path search functions into the aliases\n # Need this inline/lazy import here since we use locate_binary that\n # relies on __xonsh_env__ in default aliases\n builtins.default_aliases = builtins.aliases = Aliases(make_default_aliases())\n builtins.__xonsh_history__ = None\n atexit.register(_lastflush)\n for sig in AT_EXIT_SIGNALS:\n resetting_signal_handle(sig, _lastflush)\n BUILTINS_LOADED = True\n\n\ndef _lastflush(s=None, f=None):\n if hasattr(builtins, '__xonsh_history__'):\n if builtins.__xonsh_history__ is not None:\n builtins.__xonsh_history__.flush(at_exit=True)\n\n\ndef unload_builtins():\n \"\"\"Removes the xonsh builtins from the Python builtins, if the\n BUILTINS_LOADED is True, sets BUILTINS_LOADED to False, and returns.\n \"\"\"\n global BUILTINS_LOADED\n env = getattr(builtins, '__xonsh_env__', None)\n if isinstance(env, Env):\n env.undo_replace_env()\n if hasattr(builtins, '__xonsh_pyexit__'):\n builtins.exit = builtins.__xonsh_pyexit__\n if hasattr(builtins, '__xonsh_pyquit__'):\n builtins.quit = builtins.__xonsh_pyquit__\n if not BUILTINS_LOADED:\n return\n names = ['__xonsh_config__',\n '__xonsh_env__',\n '__xonsh_ctx__',\n '__xonsh_help__',\n '__xonsh_superhelp__',\n '__xonsh_pathsearch__',\n '__xonsh_globsearch__',\n '__xonsh_regexsearch__',\n '__xonsh_glob__',\n '__xonsh_expand_path__',\n '__xonsh_exit__',\n '__xonsh_stdout_uncaptured__',\n '__xonsh_stderr_uncaptured__',\n '__xonsh_pyexit__',\n '__xonsh_pyquit__',\n '__xonsh_subproc_captured_stdout__',\n '__xonsh_subproc_captured_inject__',\n '__xonsh_subproc_captured_object__',\n '__xonsh_subproc_captured_hiddenobject__',\n '__xonsh_subproc_uncaptured__',\n '__xonsh_execer__',\n '__xonsh_commands_cache__',\n '__xonsh_completers__',\n '__xonsh_call_macro__',\n '__xonsh_enter_macro__',\n '__xonsh_path_literal__',\n 'XonshError',\n 'XonshCalledProcessError',\n 'evalx',\n 'execx',\n 'compilex',\n 'default_aliases',\n '__xonsh_all_jobs__',\n '__xonsh_ensure_list_of_strs__',\n '__xonsh_list_of_strs_or_callables__',\n '__xonsh_history__',\n ]\n for name in names:\n if hasattr(builtins, name):\n delattr(builtins, name)\n BUILTINS_LOADED = False\n\n\[email protected]\ndef xonsh_builtins(execer=None):\n \"\"\"A context manager for using the xonsh builtins only in a limited\n scope. Likely useful in testing.\n \"\"\"\n load_builtins(execer=execer)\n yield\n unload_builtins()\n", "path": "xonsh/built_ins.py" } ]
diff --git a/news/fix_at_dollar.rst b/news/fix_at_dollar.rst new file mode 100644 index 0000000000..ddfc6c05ba --- /dev/null +++ b/news/fix_at_dollar.rst @@ -0,0 +1,13 @@ +**Added:** None + +**Changed:** None + +**Deprecated:** None + +**Removed:** None + +**Fixed:** + +* ``@$`` operator now functions properly when returned command is an alias + +**Security:** None diff --git a/tests/test_integrations.py b/tests/test_integrations.py index c9c90f7b54..9967584695 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -217,6 +217,14 @@ def _echo(args): echo --option1 \ --option2 """, '--option1 --option2\n', 0), +# +# test @$() with aliases +# +(""" +aliases['ls'] = 'spam spam sausage spam' + +echo @$(which ls) +""", 'spam spam sausage spam\n', 0), ] diff --git a/xonsh/built_ins.py b/xonsh/built_ins.py index cc06e47ff8..6e75f93b82 100644 --- a/xonsh/built_ins.py +++ b/xonsh/built_ins.py @@ -858,7 +858,7 @@ def subproc_captured_inject(*cmds): or shlex.split(). """ s = run_subproc(cmds, captured='stdout') - toks = builtins.__xonsh_execer__.parser.lexer.split(s) + toks = builtins.__xonsh_execer__.parser.lexer.split(s.strip()) return toks
learningequality__kolibri-219
AttributeError: `Notetype` object has no attribute `MethodFilter` ## Summary I generated a `kolibri-static`via `python setup.py bdist_wheel --static` then install it on my `Windows 7 VM` using pip then run `kolibre manage runserver` I got an error see the screenshots below. ## Branch or installer - Version: `master` ## Screenshots ![screen shot 2016-07-09 at 2 34 43 am](https://cloud.githubusercontent.com/assets/4099119/16698854/182d0d34-4583-11e6-9848-62ea7d2019a1.png) ## How to reproduce 1. cd ~/kolibri in your working machine 2. run command `python setup.py bdist_wheel --static` 3. install the kolibri-static in your Windows 7 virtualbox
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport shutil\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.install_scripts import install_scripts\n\n# Notice that we dare do this during setup.py -- this enforces a special\n# restraint on module initialization, namely that it shouldn't do anything\n# that depends on an installed environment.\nimport kolibri\nfrom kolibri import dist as kolibri_dist\n\n\ndef read_file(fname):\n \"\"\"\n Read file and decode in py2k\n \"\"\"\n if sys.version_info < (3,):\n return open(fname).read().decode(\"utf-8\")\n return open(fname).read()\n\ndist_name = 'kolibri'\n\nreadme = read_file('README.rst')\ndoclink = \"\"\"\nDocumentation\n-------------\n\nThe full documentation is at `http://kolibri.rtfd.org <http://kolibri.rtfd.org>`_.\"\"\"\n\n# Default description of the distributed package\ndescription = (\n \"\"\"Kolibri education platform for offline environments\"\"\"\n)\n\n# Decide if the invoked command is a request to do building\nis_building_dist = any(\n [x in sys.argv for x in (\n \"bdist\",\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_deb\",\n \"sdist_dsc\"\n )]\n)\n\nstatic_requirements = []\nstatic_dir = os.path.dirname(os.path.realpath(kolibri_dist.__file__))\n\ninstall_requires = [\n 'colorlog',\n 'django>=1.9,<1.10',\n 'django-mptt==0.8.4',\n 'django-js-reverse==0.7.2',\n 'djangorestframework==3.3.3',\n 'docopt',\n 'six',\n]\n\n# Check if user supplied the special '--static' option\nif '--static' in sys.argv:\n sys.argv.remove('--static')\n dist_name = 'kolibri-static'\n description += \" This static version bundles all dependencies.\"\n install_requires, static_requirements = [], install_requires\n static_build = True\n\n\n################\n# Windows code #\n################\n#\n# Close your eyes\n\nBAT_TEMPLATE = \\\n r\"\"\"@echo off\nset mypath=%~dp0\nset pyscript=\"%mypath%{FNAME}\"\nset /p line1=<%pyscript%\nif \"%line1:~0,2%\" == \"#!\" (goto :goodstart)\necho First line of %pyscript% does not start with \"#!\"\nexit /b 1\n:goodstart\nset py_exe=%line1:~2%\ncall %py_exe% %pyscript% %*\n\"\"\"\n\n\nclass bat_install_scripts(install_scripts):\n \"\"\"\n Automatically creates .bat scripts for each executable distributed\n \"\"\"\n\n def run(self):\n install_scripts.run(self)\n if not os.name == \"nt\":\n return\n for filepath in self.get_outputs():\n # If we can find an executable name in the #! top line of the script\n # file, make .bat wrapper for script.\n with open(filepath, 'rt') as fobj:\n first_line = fobj.readline()\n if not (first_line.startswith('#!') and\n 'python' in first_line.lower()):\n continue\n pth, fname = os.path.split(filepath)\n froot, ___ = os.path.splitext(fname)\n bat_file = os.path.join(pth, froot + '.bat')\n bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)\n if self.dry_run:\n continue\n with open(bat_file, 'wt') as fobj:\n fobj.write(bat_contents)\n\n\n# You can open your eyes again\n#\n#####################\n# END: Windows code #\n#####################\n\n\n######################################\n# STATIC AND DYNAMIC BUILD SPECIFICS #\n######################################\n\ndef enable_log_to_stdout(logname):\n \"\"\"Given a log name, outputs > INFO to stdout.\"\"\"\n log = logging.getLogger(logname)\n log.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n log.addHandler(ch)\n\n\n# If it's a static build, we invoke pip to bundle dependencies in python-packages\n# This would be the case for commands \"bdist\" and \"sdist\"\nif static_requirements and is_building_dist:\n\n sys.stderr.write(\n \"This is a static build... invoking pip to put static dependencies in \"\n \"dist-packages/\\n\\n\"\n \"Requirements:\\n\\n\" + \"\\n\".join(static_requirements)\n )\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n static_cache_dir = os.path.join(current_dir, 'dist-packages-cache')\n static_temp_dir = os.path.join(current_dir, 'dist-packages-temp')\n\n # Create directory where dynamically created dependencies are put\n if not os.path.exists(static_cache_dir):\n os.mkdir(static_cache_dir)\n\n # Should remove the temporary directory always\n if os.path.exists(static_temp_dir):\n sys.stderr.write(\"Removing previous temporary sources for pip {}\".format(static_temp_dir))\n shutil.rmtree(static_temp_dir)\n\n # Install from pip\n\n # Code modified from this example:\n # http://threebean.org/blog/2011/06/06/installing-from-pip-inside-python-or-a-simple-pip-api/\n import pip.commands.install\n\n # Ensure we get output from pip\n enable_log_to_stdout('pip.commands.install')\n\n def install_distributions(distributions):\n command = pip.commands.install.InstallCommand()\n opts, ___ = command.parser.parse_args([])\n opts.target_dir = static_dir\n opts.build_dir = static_temp_dir\n opts.download_cache = static_cache_dir\n opts.isolated = True\n opts.compile = False\n opts.ignore_dependencies = True\n # opts.use_wheel = False\n opts.no_clean = False\n command.run(opts, distributions)\n # requirement_set.source_dir = STATIC_DIST_PACKAGES_TEMP\n # requirement_set.install(opts)\n\n install_distributions(static_requirements)\n\nelif is_building_dist:\n\n if len(os.listdir(static_dir)) > 3:\n raise RuntimeError(\n \"Please empty {} - make clean!\".format(\n static_dir\n )\n )\n\n\nsetup(\n name=dist_name,\n version=kolibri.__version__,\n description=description,\n long_description=\"{readme}\\n\\n{doclink}\".format(\n readme=readme,\n doclink=doclink\n ),\n author='Learning Equality',\n author_email='[email protected]',\n url='https://github.com/learningequality/kolibri',\n packages=[\n str('kolibri'), # https://github.com/pypa/setuptools/pull/597\n ],\n entry_points={\n 'console_scripts': [\n 'kolibri = kolibri.utils.cli:main'\n ]\n },\n package_dir={'kolibri': 'kolibri'},\n include_package_data=True,\n install_requires=install_requires,\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'tox', 'flake8'],\n license='MIT',\n zip_safe=False,\n keywords='kolibri',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n cmdclass={\n 'install_scripts': bat_install_scripts # Windows bat wrapper\n }\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport shutil\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.install_scripts import install_scripts\n\n# Notice that we dare do this during setup.py -- this enforces a special\n# restraint on module initialization, namely that it shouldn't do anything\n# that depends on an installed environment.\nimport kolibri\nfrom kolibri import dist as kolibri_dist\n\n\ndef read_file(fname):\n \"\"\"\n Read file and decode in py2k\n \"\"\"\n if sys.version_info < (3,):\n return open(fname).read().decode(\"utf-8\")\n return open(fname).read()\n\ndist_name = 'kolibri'\n\nreadme = read_file('README.rst')\ndoclink = \"\"\"\nDocumentation\n-------------\n\nThe full documentation is at `http://kolibri.rtfd.org <http://kolibri.rtfd.org>`_.\"\"\"\n\n# Default description of the distributed package\ndescription = (\n \"\"\"Kolibri education platform for offline environments\"\"\"\n)\n\n# Decide if the invoked command is a request to do building\nis_building_dist = any(\n [x in sys.argv for x in (\n \"bdist\",\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_deb\",\n \"sdist_dsc\"\n )]\n)\n\nstatic_requirements = []\nstatic_dir = os.path.dirname(os.path.realpath(kolibri_dist.__file__))\n\ninstall_requires = [\n 'colorlog',\n 'django>=1.9,<1.10',\n 'django-filter>=0.13.0',\n 'django-mptt==0.8.4',\n 'django-js-reverse==0.7.2',\n 'djangorestframework==3.3.3',\n 'docopt',\n 'six',\n]\n\n# Check if user supplied the special '--static' option\nif '--static' in sys.argv:\n sys.argv.remove('--static')\n dist_name = 'kolibri-static'\n description += \" This static version bundles all dependencies.\"\n install_requires, static_requirements = [], install_requires\n static_build = True\n\n\n################\n# Windows code #\n################\n#\n# Close your eyes\n\nBAT_TEMPLATE = \\\n r\"\"\"@echo off\nset mypath=%~dp0\nset pyscript=\"%mypath%{FNAME}\"\nset /p line1=<%pyscript%\nif \"%line1:~0,2%\" == \"#!\" (goto :goodstart)\necho First line of %pyscript% does not start with \"#!\"\nexit /b 1\n:goodstart\nset py_exe=%line1:~2%\ncall %py_exe% %pyscript% %*\n\"\"\"\n\n\nclass bat_install_scripts(install_scripts):\n \"\"\"\n Automatically creates .bat scripts for each executable distributed\n \"\"\"\n\n def run(self):\n install_scripts.run(self)\n if not os.name == \"nt\":\n return\n for filepath in self.get_outputs():\n # If we can find an executable name in the #! top line of the script\n # file, make .bat wrapper for script.\n with open(filepath, 'rt') as fobj:\n first_line = fobj.readline()\n if not (first_line.startswith('#!') and\n 'python' in first_line.lower()):\n continue\n pth, fname = os.path.split(filepath)\n froot, ___ = os.path.splitext(fname)\n bat_file = os.path.join(pth, froot + '.bat')\n bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)\n if self.dry_run:\n continue\n with open(bat_file, 'wt') as fobj:\n fobj.write(bat_contents)\n\n\n# You can open your eyes again\n#\n#####################\n# END: Windows code #\n#####################\n\n\n######################################\n# STATIC AND DYNAMIC BUILD SPECIFICS #\n######################################\n\ndef enable_log_to_stdout(logname):\n \"\"\"Given a log name, outputs > INFO to stdout.\"\"\"\n log = logging.getLogger(logname)\n log.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n log.addHandler(ch)\n\n\n# If it's a static build, we invoke pip to bundle dependencies in python-packages\n# This would be the case for commands \"bdist\" and \"sdist\"\nif static_requirements and is_building_dist:\n\n sys.stderr.write(\n \"This is a static build... invoking pip to put static dependencies in \"\n \"dist-packages/\\n\\n\"\n \"Requirements:\\n\\n\" + \"\\n\".join(static_requirements)\n )\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n static_cache_dir = os.path.join(current_dir, 'dist-packages-cache')\n static_temp_dir = os.path.join(current_dir, 'dist-packages-temp')\n\n # Create directory where dynamically created dependencies are put\n if not os.path.exists(static_cache_dir):\n os.mkdir(static_cache_dir)\n\n # Should remove the temporary directory always\n if os.path.exists(static_temp_dir):\n sys.stderr.write(\"Removing previous temporary sources for pip {}\".format(static_temp_dir))\n shutil.rmtree(static_temp_dir)\n\n # Install from pip\n\n # Code modified from this example:\n # http://threebean.org/blog/2011/06/06/installing-from-pip-inside-python-or-a-simple-pip-api/\n import pip.commands.install\n\n # Ensure we get output from pip\n enable_log_to_stdout('pip.commands.install')\n\n def install_distributions(distributions):\n command = pip.commands.install.InstallCommand()\n opts, ___ = command.parser.parse_args([])\n opts.target_dir = static_dir\n opts.build_dir = static_temp_dir\n opts.download_cache = static_cache_dir\n opts.isolated = True\n opts.compile = False\n opts.ignore_dependencies = True\n # opts.use_wheel = False\n opts.no_clean = False\n command.run(opts, distributions)\n # requirement_set.source_dir = STATIC_DIST_PACKAGES_TEMP\n # requirement_set.install(opts)\n\n install_distributions(static_requirements)\n\nelif is_building_dist:\n\n if len(os.listdir(static_dir)) > 3:\n raise RuntimeError(\n \"Please empty {} - make clean!\".format(\n static_dir\n )\n )\n\n\nsetup(\n name=dist_name,\n version=kolibri.__version__,\n description=description,\n long_description=\"{readme}\\n\\n{doclink}\".format(\n readme=readme,\n doclink=doclink\n ),\n author='Learning Equality',\n author_email='[email protected]',\n url='https://github.com/learningequality/kolibri',\n packages=[\n str('kolibri'), # https://github.com/pypa/setuptools/pull/597\n ],\n entry_points={\n 'console_scripts': [\n 'kolibri = kolibri.utils.cli:main'\n ]\n },\n package_dir={'kolibri': 'kolibri'},\n include_package_data=True,\n install_requires=install_requires,\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'tox', 'flake8'],\n license='MIT',\n zip_safe=False,\n keywords='kolibri',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n cmdclass={\n 'install_scripts': bat_install_scripts # Windows bat wrapper\n }\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 34ffc239c20..e3c73f6d1bf 100644 --- a/setup.py +++ b/setup.py @@ -56,6 +56,7 @@ def read_file(fname): install_requires = [ 'colorlog', 'django>=1.9,<1.10', + 'django-filter>=0.13.0', 'django-mptt==0.8.4', 'django-js-reverse==0.7.2', 'djangorestframework==3.3.3',
ipython__ipython-10668
No module named sphinx.util.compat error using `IPython.sphinxext` with Sphinx pre1.7 master branch Trying out `IPython.sphinxext` 6.1.0 with ``Sphinx==1.7.dev20170617`` I get the following error on a Sphinx project: ``` Extension error: Could not import extension IPython.sphinxext.ipython_directive (exception: No module named 'sphinx.util.compat') ``` Indeed, the `sphinx.util.compat` is deprecated at 1.6 and will be [removed at Sphinx 1.7](https://github.com/sphinx-doc/sphinx/blob/3d5e0c5d7/CHANGES#L45). To reproduce, in a virtual env with IPython 6.1.0 run pip install git+https://github.com/sphinx-doc/sphinx and then ``` mkdir /tmp/IPython-test cd /tmp/IPython-test yes "n" | sphinx-quickstart --dot _ --project IPython-test --author "Example" -v 0 --release 0 --language en --suffix .rst --master index --makefile --batchfile . echo ' extensions = [ "IPython.sphinxext.ipython_directive", ] ' >> conf.py make html ```
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nSphinx directive to support embedded IPython code.\n\nThis directive allows pasting of entire interactive IPython sessions, prompts\nand all, and their code will actually get re-executed at doc build time, with\nall prompts renumbered sequentially. It also allows you to input code as a pure\npython input by giving the argument python to the directive. The output looks\nlike an interactive ipython section.\n\nTo enable this directive, simply list it in your Sphinx ``conf.py`` file\n(making sure the directory where you placed it is visible to sphinx, as is\nneeded for all Sphinx directives). For example, to enable syntax highlighting\nand the IPython directive::\n\n extensions = ['IPython.sphinxext.ipython_console_highlighting',\n 'IPython.sphinxext.ipython_directive']\n\nThe IPython directive outputs code-blocks with the language 'ipython'. So\nif you do not have the syntax highlighting extension enabled as well, then\nall rendered code-blocks will be uncolored. By default this directive assumes\nthat your prompts are unchanged IPython ones, but this can be customized.\nThe configurable options that can be placed in conf.py are:\n\nipython_savefig_dir:\n The directory in which to save the figures. This is relative to the\n Sphinx source directory. The default is `html_static_path`.\nipython_rgxin:\n The compiled regular expression to denote the start of IPython input\n lines. The default is re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_rgxout:\n The compiled regular expression to denote the start of IPython output\n lines. The default is re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_promptin:\n The string to represent the IPython input prompt in the generated ReST.\n The default is 'In [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_promptout:\n The string to represent the IPython prompt in the generated ReST. The\n default is 'Out [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_mplbackend:\n The string which specifies if the embedded Sphinx shell should import\n Matplotlib and set the backend. The value specifies a backend that is\n passed to `matplotlib.use()` before any lines in `ipython_execlines` are\n executed. If not specified in conf.py, then the default value of 'agg' is\n used. To use the IPython directive without matplotlib as a dependency, set\n the value to `None`. It may end up that matplotlib is still imported\n if the user specifies so in `ipython_execlines` or makes use of the\n @savefig pseudo decorator.\nipython_execlines:\n A list of strings to be exec'd in the embedded Sphinx shell. Typical\n usage is to make certain packages always available. Set this to an empty\n list if you wish to have no imports always available. If specified in\n conf.py as `None`, then it has the effect of making no imports available.\n If omitted from conf.py altogether, then the default value of\n ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.\nipython_holdcount\n When the @suppress pseudo-decorator is used, the execution count can be\n incremented or not. The default behavior is to hold the execution count,\n corresponding to a value of `True`. Set this to `False` to increment\n the execution count after each suppressed command.\n\nAs an example, to use the IPython directive when `matplotlib` is not available,\none sets the backend to `None`::\n\n ipython_mplbackend = None\n\nAn example usage of the directive is:\n\n.. code-block:: rst\n\n .. ipython::\n\n In [1]: x = 1\n\n In [2]: y = x**2\n\n In [3]: print(y)\n\nSee http://matplotlib.org/sampledoc/ipython_directive.html for additional\ndocumentation.\n\nPseudo-Decorators\n=================\n\nNote: Only one decorator is supported per input. If more than one decorator\nis specified, then only the last one is used.\n\nIn addition to the Pseudo-Decorators/options described at the above link,\nseveral enhancements have been made. The directive will emit a message to the\nconsole at build-time if code-execution resulted in an exception or warning.\nYou can suppress these on a per-block basis by specifying the :okexcept:\nor :okwarning: options:\n\n.. code-block:: rst\n\n .. ipython::\n :okexcept:\n :okwarning:\n\n In [1]: 1/0\n In [2]: # raise warning.\n\nToDo\n----\n\n- Turn the ad-hoc test() function into a real test suite.\n- Break up ipython-specific functionality from matplotlib stuff into better\n separated code.\n\nAuthors\n-------\n\n- John D Hunter: orignal author.\n- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.\n- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.\n- Skipper Seabold, refactoring, cleanups, pure python addition\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib\nimport atexit\nimport errno\nimport os\nimport re\nimport sys\nimport tempfile\nimport ast\nimport warnings\nimport shutil\nfrom io import StringIO\n\n# Third-party\nfrom docutils.parsers.rst import directives\nfrom sphinx.util.compat import Directive\n\n# Our own\nfrom traitlets.config import Config\nfrom IPython import InteractiveShell\nfrom IPython.core.profiledir import ProfileDir\n\n#-----------------------------------------------------------------------------\n# Globals\n#-----------------------------------------------------------------------------\n# for tokenizing blocks\nCOMMENT, INPUT, OUTPUT = range(3)\n\n#-----------------------------------------------------------------------------\n# Functions and class declarations\n#-----------------------------------------------------------------------------\n\ndef block_parser(part, rgxin, rgxout, fmtin, fmtout):\n \"\"\"\n part is a string of ipython text, comprised of at most one\n input, one output, comments, and blank lines. The block parser\n parses the text into a list of::\n\n blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]\n\n where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and\n data is, depending on the type of token::\n\n COMMENT : the comment string\n\n INPUT: the (DECORATOR, INPUT_LINE, REST) where\n DECORATOR: the input decorator (or None)\n INPUT_LINE: the input as string (possibly multi-line)\n REST : any stdout generated by the input line (not OUTPUT)\n\n OUTPUT: the output string, possibly multi-line\n\n \"\"\"\n block = []\n lines = part.split('\\n')\n N = len(lines)\n i = 0\n decorator = None\n while 1:\n\n if i==N:\n # nothing left to parse -- the last line\n break\n\n line = lines[i]\n i += 1\n line_stripped = line.strip()\n if line_stripped.startswith('#'):\n block.append((COMMENT, line))\n continue\n\n if line_stripped.startswith('@'):\n # Here is where we assume there is, at most, one decorator.\n # Might need to rethink this.\n decorator = line_stripped\n continue\n\n # does this look like an input line?\n matchin = rgxin.match(line)\n if matchin:\n lineno, inputline = int(matchin.group(1)), matchin.group(2)\n\n # the ....: continuation string\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n Nc = len(continuation)\n # input lines can continue on for more than one line, if\n # we have a '\\' line continuation char or a function call\n # echo line 'print'. The input line can only be\n # terminated by the end of the block or an output line, so\n # we parse out the rest of the input line if it is\n # multiline as well as any echo text\n\n rest = []\n while i<N:\n\n # look ahead; if the next line is blank, or a comment, or\n # an output line, we're done\n\n nextline = lines[i]\n matchout = rgxout.match(nextline)\n #print \"nextline=%s, continuation=%s, starts=%s\"%(nextline, continuation, nextline.startswith(continuation))\n if matchout or nextline.startswith('#'):\n break\n elif nextline.startswith(continuation):\n # The default ipython_rgx* treat the space following the colon as optional.\n # However, If the space is there we must consume it or code\n # employing the cython_magic extension will fail to execute.\n #\n # This works with the default ipython_rgx* patterns,\n # If you modify them, YMMV.\n nextline = nextline[Nc:]\n if nextline and nextline[0] == ' ':\n nextline = nextline[1:]\n\n inputline += '\\n' + nextline\n else:\n rest.append(nextline)\n i+= 1\n\n block.append((INPUT, (decorator, inputline, '\\n'.join(rest))))\n continue\n\n # if it looks like an output line grab all the text to the end\n # of the block\n matchout = rgxout.match(line)\n if matchout:\n lineno, output = int(matchout.group(1)), matchout.group(2)\n if i<N-1:\n output = '\\n'.join([output] + lines[i:])\n\n block.append((OUTPUT, output))\n break\n\n return block\n\n\nclass EmbeddedSphinxShell(object):\n \"\"\"An embedded IPython instance to run inside Sphinx\"\"\"\n\n def __init__(self, exec_lines=None):\n\n self.cout = StringIO()\n\n if exec_lines is None:\n exec_lines = []\n\n # Create config object for IPython\n config = Config()\n config.HistoryManager.hist_file = ':memory:'\n config.InteractiveShell.autocall = False\n config.InteractiveShell.autoindent = False\n config.InteractiveShell.colors = 'NoColor'\n\n # create a profile so instance history isn't saved\n tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')\n profname = 'auto_profile_sphinx_build'\n pdir = os.path.join(tmp_profile_dir,profname)\n profile = ProfileDir.create_profile_dir(pdir)\n\n # Create and initialize global ipython, but don't start its mainloop.\n # This will persist across different EmbededSphinxShell instances.\n IP = InteractiveShell.instance(config=config, profile_dir=profile)\n atexit.register(self.cleanup)\n\n sys.stdout = self.cout\n sys.stderr = self.cout\n\n # For debugging, so we can see normal output, use this:\n #from IPython.utils.io import Tee\n #sys.stdout = Tee(self.cout, channel='stdout') # dbg\n #sys.stderr = Tee(self.cout, channel='stderr') # dbg\n\n # Store a few parts of IPython we'll need.\n self.IP = IP\n self.user_ns = self.IP.user_ns\n self.user_global_ns = self.IP.user_global_ns\n\n self.input = ''\n self.output = ''\n self.tmp_profile_dir = tmp_profile_dir\n\n self.is_verbatim = False\n self.is_doctest = False\n self.is_suppress = False\n\n # Optionally, provide more detailed information to shell.\n # this is assigned by the SetUp method of IPythonDirective\n # to point at itself.\n #\n # So, you can access handy things at self.directive.state\n self.directive = None\n\n # on the first call to the savefig decorator, we'll import\n # pyplot as plt so we can make a call to the plt.gcf().savefig\n self._pyplot_imported = False\n\n # Prepopulate the namespace.\n for line in exec_lines:\n self.process_input_line(line, store_history=False)\n\n def cleanup(self):\n shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)\n\n def clear_cout(self):\n self.cout.seek(0)\n self.cout.truncate(0)\n\n def process_input_line(self, line, store_history=True):\n \"\"\"process the input, capturing stdout\"\"\"\n\n stdout = sys.stdout\n splitter = self.IP.input_splitter\n try:\n sys.stdout = self.cout\n splitter.push(line)\n more = splitter.push_accepts_more()\n if not more:\n source_raw = splitter.raw_reset()\n self.IP.run_cell(source_raw, store_history=store_history)\n finally:\n sys.stdout = stdout\n\n def process_image(self, decorator):\n \"\"\"\n # build out an image directive like\n # .. image:: somefile.png\n # :width 4in\n #\n # from an input like\n # savefig somefile.png width=4in\n \"\"\"\n savefig_dir = self.savefig_dir\n source_dir = self.source_dir\n saveargs = decorator.split(' ')\n filename = saveargs[1]\n # insert relative path to image file in source (as absolute path for Sphinx)\n outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename),\n source_dir)\n\n imagerows = ['.. image:: %s'%outfile]\n\n for kwarg in saveargs[2:]:\n arg, val = kwarg.split('=')\n arg = arg.strip()\n val = val.strip()\n imagerows.append(' :%s: %s'%(arg, val))\n\n image_file = os.path.basename(outfile) # only return file name\n image_directive = '\\n'.join(imagerows)\n return image_file, image_directive\n\n # Callbacks for each type of token\n def process_input(self, data, input_prompt, lineno):\n \"\"\"\n Process data block for INPUT token.\n\n \"\"\"\n decorator, input, rest = data\n image_file = None\n image_directive = None\n\n is_verbatim = decorator=='@verbatim' or self.is_verbatim\n is_doctest = (decorator is not None and \\\n decorator.startswith('@doctest')) or self.is_doctest\n is_suppress = decorator=='@suppress' or self.is_suppress\n is_okexcept = decorator=='@okexcept' or self.is_okexcept\n is_okwarning = decorator=='@okwarning' or self.is_okwarning\n is_savefig = decorator is not None and \\\n decorator.startswith('@savefig')\n\n input_lines = input.split('\\n')\n if len(input_lines) > 1:\n if input_lines[-1] != \"\":\n input_lines.append('') # make sure there's a blank line\n # so splitter buffer gets reset\n\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n\n if is_savefig:\n image_file, image_directive = self.process_image(decorator)\n\n ret = []\n is_semicolon = False\n\n # Hold the execution count, if requested to do so.\n if is_suppress and self.hold_count:\n store_history = False\n else:\n store_history = True\n\n # Note: catch_warnings is not thread safe\n with warnings.catch_warnings(record=True) as ws:\n for i, line in enumerate(input_lines):\n if line.endswith(';'):\n is_semicolon = True\n\n if i == 0:\n # process the first input line\n if is_verbatim:\n self.process_input_line('')\n self.IP.execution_count += 1 # increment it anyway\n else:\n # only submit the line in non-verbatim mode\n self.process_input_line(line, store_history=store_history)\n formatted_line = '%s %s'%(input_prompt, line)\n else:\n # process a continuation line\n if not is_verbatim:\n self.process_input_line(line, store_history=store_history)\n\n formatted_line = '%s %s'%(continuation, line)\n\n if not is_suppress:\n ret.append(formatted_line)\n\n if not is_suppress and len(rest.strip()) and is_verbatim:\n # The \"rest\" is the standard output of the input. This needs to be\n # added when in verbatim mode. If there is no \"rest\", then we don't\n # add it, as the new line will be added by the processed output.\n ret.append(rest)\n\n # Fetch the processed output. (This is not the submitted output.)\n self.cout.seek(0)\n processed_output = self.cout.read()\n if not is_suppress and not is_semicolon:\n #\n # In IPythonDirective.run, the elements of `ret` are eventually\n # combined such that '' entries correspond to newlines. So if\n # `processed_output` is equal to '', then the adding it to `ret`\n # ensures that there is a blank line between consecutive inputs\n # that have no outputs, as in:\n #\n # In [1]: x = 4\n #\n # In [2]: x = 5\n #\n # When there is processed output, it has a '\\n' at the tail end. So\n # adding the output to `ret` will provide the necessary spacing\n # between consecutive input/output blocks, as in:\n #\n # In [1]: x\n # Out[1]: 5\n #\n # In [2]: x\n # Out[2]: 5\n #\n # When there is stdout from the input, it also has a '\\n' at the\n # tail end, and so this ensures proper spacing as well. E.g.:\n #\n # In [1]: print x\n # 5\n #\n # In [2]: x = 5\n #\n # When in verbatim mode, `processed_output` is empty (because\n # nothing was passed to IP. Sometimes the submitted code block has\n # an Out[] portion and sometimes it does not. When it does not, we\n # need to ensure proper spacing, so we have to add '' to `ret`.\n # However, if there is an Out[] in the submitted code, then we do\n # not want to add a newline as `process_output` has stuff to add.\n # The difficulty is that `process_input` doesn't know if\n # `process_output` will be called---so it doesn't know if there is\n # Out[] in the code block. The requires that we include a hack in\n # `process_block`. See the comments there.\n #\n ret.append(processed_output)\n elif is_semicolon:\n # Make sure there is a newline after the semicolon.\n ret.append('')\n\n # context information\n filename = \"Unknown\"\n lineno = 0\n if self.directive.state:\n filename = self.directive.state.document.current_source\n lineno = self.directive.state.document.current_line\n\n # output any exceptions raised during execution to stdout\n # unless :okexcept: has been specified.\n if not is_okexcept and \"Traceback\" in processed_output:\n s = \"\\nException in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okexcept: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write(processed_output)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n\\n')\n\n # output any warning raised during execution to stdout\n # unless :okwarning: has been specified.\n if not is_okwarning:\n for w in ws:\n s = \"\\nWarning in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okwarning: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write(('-' * 76) + '\\n')\n s=warnings.formatwarning(w.message, w.category,\n w.filename, w.lineno, w.line)\n sys.stdout.write(s)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n')\n\n self.cout.truncate(0)\n\n return (ret, input_lines, processed_output,\n is_doctest, decorator, image_file, image_directive)\n\n\n def process_output(self, data, output_prompt, input_lines, output,\n is_doctest, decorator, image_file):\n \"\"\"\n Process data block for OUTPUT token.\n\n \"\"\"\n # Recall: `data` is the submitted output, and `output` is the processed\n # output from `input_lines`.\n\n TAB = ' ' * 4\n\n if is_doctest and output is not None:\n\n found = output # This is the processed output\n found = found.strip()\n submitted = data.strip()\n\n if self.directive is None:\n source = 'Unavailable'\n content = 'Unavailable'\n else:\n source = self.directive.state.document.current_source\n content = self.directive.content\n # Add tabs and join into a single string.\n content = '\\n'.join([TAB + line for line in content])\n\n # Make sure the output contains the output prompt.\n ind = found.find(output_prompt)\n if ind < 0:\n e = ('output does not contain output prompt\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'Input line(s):\\n{TAB}{2}\\n\\n'\n 'Output line(s):\\n{TAB}{3}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), TAB=TAB)\n raise RuntimeError(e)\n found = found[len(output_prompt):].strip()\n\n # Handle the actual doctest comparison.\n if decorator.strip() == '@doctest':\n # Standard doctest\n if found != submitted:\n e = ('doctest failure\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'On input line(s):\\n{TAB}{2}\\n\\n'\n 'we found output:\\n{TAB}{3}\\n\\n'\n 'instead of the expected:\\n{TAB}{4}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), repr(submitted), TAB=TAB)\n raise RuntimeError(e)\n else:\n self.custom_doctest(decorator, input_lines, found, submitted)\n\n # When in verbatim mode, this holds additional submitted output\n # to be written in the final Sphinx output.\n # https://github.com/ipython/ipython/issues/5776\n out_data = []\n\n is_verbatim = decorator=='@verbatim' or self.is_verbatim\n if is_verbatim and data.strip():\n # Note that `ret` in `process_block` has '' as its last element if\n # the code block was in verbatim mode. So if there is no submitted\n # output, then we will have proper spacing only if we do not add\n # an additional '' to `out_data`. This is why we condition on\n # `and data.strip()`.\n\n # The submitted output has no output prompt. If we want the\n # prompt and the code to appear, we need to join them now\n # instead of adding them separately---as this would create an\n # undesired newline. How we do this ultimately depends on the\n # format of the output regex. I'll do what works for the default\n # prompt for now, and we might have to adjust if it doesn't work\n # in other cases. Finally, the submitted output does not have\n # a trailing newline, so we must add it manually.\n out_data.append(\"{0} {1}\\n\".format(output_prompt, data))\n\n return out_data\n\n def process_comment(self, data):\n \"\"\"Process data fPblock for COMMENT token.\"\"\"\n if not self.is_suppress:\n return [data]\n\n def save_image(self, image_file):\n \"\"\"\n Saves the image file to disk.\n \"\"\"\n self.ensure_pyplot()\n command = 'plt.gcf().savefig(\"%s\")'%image_file\n #print 'SAVEFIG', command # dbg\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\n self.process_input_line('cd -b ipy_savedir', store_history=False)\n self.process_input_line(command, store_history=False)\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\n self.clear_cout()\n\n def process_block(self, block):\n \"\"\"\n process block from the block_parser and return a list of processed lines\n \"\"\"\n ret = []\n output = None\n input_lines = None\n lineno = self.IP.execution_count\n\n input_prompt = self.promptin % lineno\n output_prompt = self.promptout % lineno\n image_file = None\n image_directive = None\n\n found_input = False\n for token, data in block:\n if token == COMMENT:\n out_data = self.process_comment(data)\n elif token == INPUT:\n found_input = True\n (out_data, input_lines, output, is_doctest,\n decorator, image_file, image_directive) = \\\n self.process_input(data, input_prompt, lineno)\n elif token == OUTPUT:\n if not found_input:\n\n TAB = ' ' * 4\n linenumber = 0\n source = 'Unavailable'\n content = 'Unavailable'\n if self.directive:\n linenumber = self.directive.state.document.current_line\n source = self.directive.state.document.current_source\n content = self.directive.content\n # Add tabs and join into a single string.\n content = '\\n'.join([TAB + line for line in content])\n\n e = ('\\n\\nInvalid block: Block contains an output prompt '\n 'without an input prompt.\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Content begins at line {1}: \\n\\n{2}\\n\\n'\n 'Problematic block within content: \\n\\n{TAB}{3}\\n\\n')\n e = e.format(source, linenumber, content, block, TAB=TAB)\n\n # Write, rather than include in exception, since Sphinx\n # will truncate tracebacks.\n sys.stdout.write(e)\n raise RuntimeError('An invalid block was detected.')\n\n out_data = \\\n self.process_output(data, output_prompt, input_lines,\n output, is_doctest, decorator,\n image_file)\n if out_data:\n # Then there was user submitted output in verbatim mode.\n # We need to remove the last element of `ret` that was\n # added in `process_input`, as it is '' and would introduce\n # an undesirable newline.\n assert(ret[-1] == '')\n del ret[-1]\n\n if out_data:\n ret.extend(out_data)\n\n # save the image files\n if image_file is not None:\n self.save_image(image_file)\n\n return ret, image_directive\n\n def ensure_pyplot(self):\n \"\"\"\n Ensures that pyplot has been imported into the embedded IPython shell.\n\n Also, makes sure to set the backend appropriately if not set already.\n\n \"\"\"\n # We are here if the @figure pseudo decorator was used. Thus, it's\n # possible that we could be here even if python_mplbackend were set to\n # `None`. That's also strange and perhaps worthy of raising an\n # exception, but for now, we just set the backend to 'agg'.\n\n if not self._pyplot_imported:\n if 'matplotlib.backends' not in sys.modules:\n # Then ipython_matplotlib was set to None but there was a\n # call to the @figure decorator (and ipython_execlines did\n # not set a backend).\n #raise Exception(\"No backend was set, but @figure was used!\")\n import matplotlib\n matplotlib.use('agg')\n\n # Always import pyplot into embedded shell.\n self.process_input_line('import matplotlib.pyplot as plt',\n store_history=False)\n self._pyplot_imported = True\n\n def process_pure_python(self, content):\n \"\"\"\n content is a list of strings. it is unedited directive content\n\n This runs it line by line in the InteractiveShell, prepends\n prompts as needed capturing stderr and stdout, then returns\n the content as a list as if it were ipython code\n \"\"\"\n output = []\n savefig = False # keep up with this to clear figure\n multiline = False # to handle line continuation\n multiline_start = None\n fmtin = self.promptin\n\n ct = 0\n\n for lineno, line in enumerate(content):\n\n line_stripped = line.strip()\n if not len(line):\n output.append(line)\n continue\n\n # handle decorators\n if line_stripped.startswith('@'):\n output.extend([line])\n if 'savefig' in line:\n savefig = True # and need to clear figure\n continue\n\n # handle comments\n if line_stripped.startswith('#'):\n output.extend([line])\n continue\n\n # deal with lines checking for multiline\n continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))\n if not multiline:\n modified = u\"%s %s\" % (fmtin % ct, line_stripped)\n output.append(modified)\n ct += 1\n try:\n ast.parse(line_stripped)\n output.append(u'')\n except Exception: # on a multiline\n multiline = True\n multiline_start = lineno\n else: # still on a multiline\n modified = u'%s %s' % (continuation, line)\n output.append(modified)\n\n # if the next line is indented, it should be part of multiline\n if len(content) > lineno + 1:\n nextline = content[lineno + 1]\n if len(nextline) - len(nextline.lstrip()) > 3:\n continue\n try:\n mod = ast.parse(\n '\\n'.join(content[multiline_start:lineno+1]))\n if isinstance(mod.body[0], ast.FunctionDef):\n # check to see if we have the whole function\n for element in mod.body[0].body:\n if isinstance(element, ast.Return):\n multiline = False\n else:\n output.append(u'')\n multiline = False\n except Exception:\n pass\n\n if savefig: # clear figure if plotted\n self.ensure_pyplot()\n self.process_input_line('plt.clf()', store_history=False)\n self.clear_cout()\n savefig = False\n\n return output\n\n def custom_doctest(self, decorator, input_lines, found, submitted):\n \"\"\"\n Perform a specialized doctest.\n\n \"\"\"\n from .custom_doctests import doctests\n\n args = decorator.split()\n doctest_type = args[1]\n if doctest_type in doctests:\n doctests[doctest_type](self, args, input_lines, found, submitted)\n else:\n e = \"Invalid option to @doctest: {0}\".format(doctest_type)\n raise Exception(e)\n\n\nclass IPythonDirective(Directive):\n\n has_content = True\n required_arguments = 0\n optional_arguments = 4 # python, suppress, verbatim, doctest\n final_argumuent_whitespace = True\n option_spec = { 'python': directives.unchanged,\n 'suppress' : directives.flag,\n 'verbatim' : directives.flag,\n 'doctest' : directives.flag,\n 'okexcept': directives.flag,\n 'okwarning': directives.flag\n }\n\n shell = None\n\n seen_docs = set()\n\n def get_config_options(self):\n # contains sphinx configuration variables\n config = self.state.document.settings.env.config\n\n # get config variables to set figure output directory\n savefig_dir = config.ipython_savefig_dir\n source_dir = self.state.document.settings.env.srcdir\n savefig_dir = os.path.join(source_dir, savefig_dir)\n\n # get regex and prompt stuff\n rgxin = config.ipython_rgxin\n rgxout = config.ipython_rgxout\n promptin = config.ipython_promptin\n promptout = config.ipython_promptout\n mplbackend = config.ipython_mplbackend\n exec_lines = config.ipython_execlines\n hold_count = config.ipython_holdcount\n\n return (savefig_dir, source_dir, rgxin, rgxout,\n promptin, promptout, mplbackend, exec_lines, hold_count)\n\n def setup(self):\n # Get configuration values.\n (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,\n mplbackend, exec_lines, hold_count) = self.get_config_options()\n\n try:\n os.makedirs(savefig_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n if self.shell is None:\n # We will be here many times. However, when the\n # EmbeddedSphinxShell is created, its interactive shell member\n # is the same for each instance.\n\n if mplbackend and 'matplotlib.backends' not in sys.modules:\n import matplotlib\n matplotlib.use(mplbackend)\n\n # Must be called after (potentially) importing matplotlib and\n # setting its backend since exec_lines might import pylab.\n self.shell = EmbeddedSphinxShell(exec_lines)\n\n # Store IPython directive to enable better error messages\n self.shell.directive = self\n\n # reset the execution count if we haven't processed this doc\n #NOTE: this may be borked if there are multiple seen_doc tmp files\n #check time stamp?\n if not self.state.document.current_source in self.seen_docs:\n self.shell.IP.history_manager.reset()\n self.shell.IP.execution_count = 1\n self.seen_docs.add(self.state.document.current_source)\n\n # and attach to shell so we don't have to pass them around\n self.shell.rgxin = rgxin\n self.shell.rgxout = rgxout\n self.shell.promptin = promptin\n self.shell.promptout = promptout\n self.shell.savefig_dir = savefig_dir\n self.shell.source_dir = source_dir\n self.shell.hold_count = hold_count\n\n # setup bookmark for saving figures directory\n self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,\n store_history=False)\n self.shell.clear_cout()\n\n return rgxin, rgxout, promptin, promptout\n\n def teardown(self):\n # delete last bookmark\n self.shell.process_input_line('bookmark -d ipy_savedir',\n store_history=False)\n self.shell.clear_cout()\n\n def run(self):\n debug = False\n\n #TODO, any reason block_parser can't be a method of embeddable shell\n # then we wouldn't have to carry these around\n rgxin, rgxout, promptin, promptout = self.setup()\n\n options = self.options\n self.shell.is_suppress = 'suppress' in options\n self.shell.is_doctest = 'doctest' in options\n self.shell.is_verbatim = 'verbatim' in options\n self.shell.is_okexcept = 'okexcept' in options\n self.shell.is_okwarning = 'okwarning' in options\n\n # handle pure python code\n if 'python' in self.arguments:\n content = self.content\n self.content = self.shell.process_pure_python(content)\n\n # parts consists of all text within the ipython-block.\n # Each part is an input/output block.\n parts = '\\n'.join(self.content).split('\\n\\n')\n\n lines = ['.. code-block:: ipython', '']\n figures = []\n\n for part in parts:\n block = block_parser(part, rgxin, rgxout, promptin, promptout)\n if len(block):\n rows, figure = self.shell.process_block(block)\n for row in rows:\n lines.extend([' {0}'.format(line)\n for line in row.split('\\n')])\n\n if figure is not None:\n figures.append(figure)\n\n for figure in figures:\n lines.append('')\n lines.extend(figure.split('\\n'))\n lines.append('')\n\n if len(lines) > 2:\n if debug:\n print('\\n'.join(lines))\n else:\n # This has to do with input, not output. But if we comment\n # these lines out, then no IPython code will appear in the\n # final output.\n self.state_machine.insert_input(\n lines, self.state_machine.input_lines.source(0))\n\n # cleanup\n self.teardown()\n\n return []\n\n# Enable as a proper Sphinx directive\ndef setup(app):\n setup.app = app\n\n app.add_directive('ipython', IPythonDirective)\n app.add_config_value('ipython_savefig_dir', 'savefig', 'env')\n app.add_config_value('ipython_rgxin',\n re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_rgxout',\n re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_promptin', 'In [%d]:', 'env')\n app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')\n\n # We could just let matplotlib pick whatever is specified as the default\n # backend in the matplotlibrc file, but this would cause issues if the\n # backend didn't work in headless environments. For this reason, 'agg'\n # is a good default backend choice.\n app.add_config_value('ipython_mplbackend', 'agg', 'env')\n\n # If the user sets this config value to `None`, then EmbeddedSphinxShell's\n # __init__ method will treat it as [].\n execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']\n app.add_config_value('ipython_execlines', execlines, 'env')\n\n app.add_config_value('ipython_holdcount', True, 'env')\n\n metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}\n return metadata\n\n# Simple smoke test, needs to be converted to a proper automatic test.\ndef test():\n\n examples = [\n r\"\"\"\nIn [9]: pwd\nOut[9]: '/home/jdhunter/py4science/book'\n\nIn [10]: cd bookdata/\n/home/jdhunter/py4science/book/bookdata\n\nIn [2]: from pylab import *\n\nIn [2]: ion()\n\nIn [3]: im = imread('stinkbug.png')\n\n@savefig mystinkbug.png width=4in\nIn [4]: imshow(im)\nOut[4]: <matplotlib.image.AxesImage object at 0x39ea850>\n\n\"\"\",\n r\"\"\"\n\nIn [1]: x = 'hello world'\n\n# string methods can be\n# used to alter the string\n@doctest\nIn [2]: x.upper()\nOut[2]: 'HELLO WORLD'\n\n@verbatim\nIn [3]: x.st<TAB>\nx.startswith x.strip\n\"\"\",\n r\"\"\"\n\nIn [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\\\n .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'\n\nIn [131]: print url.split('&')\n['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']\n\nIn [60]: import urllib\n\n\"\"\",\n r\"\"\"\\\n\nIn [133]: import numpy.random\n\n@suppress\nIn [134]: numpy.random.seed(2358)\n\n@doctest\nIn [135]: numpy.random.rand(10,2)\nOut[135]:\narray([[ 0.64524308, 0.59943846],\n [ 0.47102322, 0.8715456 ],\n [ 0.29370834, 0.74776844],\n [ 0.99539577, 0.1313423 ],\n [ 0.16250302, 0.21103583],\n [ 0.81626524, 0.1312433 ],\n [ 0.67338089, 0.72302393],\n [ 0.7566368 , 0.07033696],\n [ 0.22591016, 0.77731835],\n [ 0.0072729 , 0.34273127]])\n\n\"\"\",\n\n r\"\"\"\nIn [106]: print x\njdh\n\nIn [109]: for i in range(10):\n .....: print i\n .....:\n .....:\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n\"\"\",\n\n r\"\"\"\n\nIn [144]: from pylab import *\n\nIn [145]: ion()\n\n# use a semicolon to suppress the output\n@savefig test_hist.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\n@savefig test_plot.png width=4in\nIn [151]: plot(np.random.randn(10000), 'o');\n \"\"\",\n\n r\"\"\"\n# use a semicolon to suppress the output\nIn [151]: plt.clf()\n\n@savefig plot_simple.png width=4in\nIn [151]: plot([1,2,3])\n\n@savefig hist_simple.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\"\"\",\n r\"\"\"\n# update the current fig\nIn [151]: ylabel('number')\n\nIn [152]: title('normal distribution')\n\n\n@savefig hist_with_text.png\nIn [153]: grid(True)\n\n@doctest float\nIn [154]: 0.1 + 0.2\nOut[154]: 0.3\n\n@doctest float\nIn [155]: np.arange(16).reshape(4,4)\nOut[155]:\narray([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\nIn [1]: x = np.arange(16, dtype=float).reshape(4,4)\n\nIn [2]: x[0,0] = np.inf\n\nIn [3]: x[0,1] = np.nan\n\n@doctest float\nIn [4]: x\nOut[4]:\narray([[ inf, nan, 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n\n\n \"\"\",\n ]\n # skip local-file depending first example:\n examples = examples[1:]\n\n #ipython_directive.DEBUG = True # dbg\n #options = dict(suppress=True) # dbg\n options = {}\n for example in examples:\n content = example.split('\\n')\n IPythonDirective('debug', arguments=None, options=options,\n content=content, lineno=0,\n content_offset=None, block_text=None,\n state=None, state_machine=None,\n )\n\n# Run test suite as a script\nif __name__=='__main__':\n if not os.path.isdir('_static'):\n os.mkdir('_static')\n test()\n print('All OK? Check figures in _static/')\n", "path": "IPython/sphinxext/ipython_directive.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nSphinx directive to support embedded IPython code.\n\nThis directive allows pasting of entire interactive IPython sessions, prompts\nand all, and their code will actually get re-executed at doc build time, with\nall prompts renumbered sequentially. It also allows you to input code as a pure\npython input by giving the argument python to the directive. The output looks\nlike an interactive ipython section.\n\nTo enable this directive, simply list it in your Sphinx ``conf.py`` file\n(making sure the directory where you placed it is visible to sphinx, as is\nneeded for all Sphinx directives). For example, to enable syntax highlighting\nand the IPython directive::\n\n extensions = ['IPython.sphinxext.ipython_console_highlighting',\n 'IPython.sphinxext.ipython_directive']\n\nThe IPython directive outputs code-blocks with the language 'ipython'. So\nif you do not have the syntax highlighting extension enabled as well, then\nall rendered code-blocks will be uncolored. By default this directive assumes\nthat your prompts are unchanged IPython ones, but this can be customized.\nThe configurable options that can be placed in conf.py are:\n\nipython_savefig_dir:\n The directory in which to save the figures. This is relative to the\n Sphinx source directory. The default is `html_static_path`.\nipython_rgxin:\n The compiled regular expression to denote the start of IPython input\n lines. The default is re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_rgxout:\n The compiled regular expression to denote the start of IPython output\n lines. The default is re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_promptin:\n The string to represent the IPython input prompt in the generated ReST.\n The default is 'In [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_promptout:\n The string to represent the IPython prompt in the generated ReST. The\n default is 'Out [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_mplbackend:\n The string which specifies if the embedded Sphinx shell should import\n Matplotlib and set the backend. The value specifies a backend that is\n passed to `matplotlib.use()` before any lines in `ipython_execlines` are\n executed. If not specified in conf.py, then the default value of 'agg' is\n used. To use the IPython directive without matplotlib as a dependency, set\n the value to `None`. It may end up that matplotlib is still imported\n if the user specifies so in `ipython_execlines` or makes use of the\n @savefig pseudo decorator.\nipython_execlines:\n A list of strings to be exec'd in the embedded Sphinx shell. Typical\n usage is to make certain packages always available. Set this to an empty\n list if you wish to have no imports always available. If specified in\n conf.py as `None`, then it has the effect of making no imports available.\n If omitted from conf.py altogether, then the default value of\n ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.\nipython_holdcount\n When the @suppress pseudo-decorator is used, the execution count can be\n incremented or not. The default behavior is to hold the execution count,\n corresponding to a value of `True`. Set this to `False` to increment\n the execution count after each suppressed command.\n\nAs an example, to use the IPython directive when `matplotlib` is not available,\none sets the backend to `None`::\n\n ipython_mplbackend = None\n\nAn example usage of the directive is:\n\n.. code-block:: rst\n\n .. ipython::\n\n In [1]: x = 1\n\n In [2]: y = x**2\n\n In [3]: print(y)\n\nSee http://matplotlib.org/sampledoc/ipython_directive.html for additional\ndocumentation.\n\nPseudo-Decorators\n=================\n\nNote: Only one decorator is supported per input. If more than one decorator\nis specified, then only the last one is used.\n\nIn addition to the Pseudo-Decorators/options described at the above link,\nseveral enhancements have been made. The directive will emit a message to the\nconsole at build-time if code-execution resulted in an exception or warning.\nYou can suppress these on a per-block basis by specifying the :okexcept:\nor :okwarning: options:\n\n.. code-block:: rst\n\n .. ipython::\n :okexcept:\n :okwarning:\n\n In [1]: 1/0\n In [2]: # raise warning.\n\nToDo\n----\n\n- Turn the ad-hoc test() function into a real test suite.\n- Break up ipython-specific functionality from matplotlib stuff into better\n separated code.\n\nAuthors\n-------\n\n- John D Hunter: orignal author.\n- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.\n- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.\n- Skipper Seabold, refactoring, cleanups, pure python addition\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib\nimport atexit\nimport errno\nimport os\nimport re\nimport sys\nimport tempfile\nimport ast\nimport warnings\nimport shutil\nfrom io import StringIO\n\n# Third-party\nfrom docutils.parsers.rst import directives\nfrom docutils.parsers.rst import Directive\n\n# Our own\nfrom traitlets.config import Config\nfrom IPython import InteractiveShell\nfrom IPython.core.profiledir import ProfileDir\n\n#-----------------------------------------------------------------------------\n# Globals\n#-----------------------------------------------------------------------------\n# for tokenizing blocks\nCOMMENT, INPUT, OUTPUT = range(3)\n\n#-----------------------------------------------------------------------------\n# Functions and class declarations\n#-----------------------------------------------------------------------------\n\ndef block_parser(part, rgxin, rgxout, fmtin, fmtout):\n \"\"\"\n part is a string of ipython text, comprised of at most one\n input, one output, comments, and blank lines. The block parser\n parses the text into a list of::\n\n blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]\n\n where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and\n data is, depending on the type of token::\n\n COMMENT : the comment string\n\n INPUT: the (DECORATOR, INPUT_LINE, REST) where\n DECORATOR: the input decorator (or None)\n INPUT_LINE: the input as string (possibly multi-line)\n REST : any stdout generated by the input line (not OUTPUT)\n\n OUTPUT: the output string, possibly multi-line\n\n \"\"\"\n block = []\n lines = part.split('\\n')\n N = len(lines)\n i = 0\n decorator = None\n while 1:\n\n if i==N:\n # nothing left to parse -- the last line\n break\n\n line = lines[i]\n i += 1\n line_stripped = line.strip()\n if line_stripped.startswith('#'):\n block.append((COMMENT, line))\n continue\n\n if line_stripped.startswith('@'):\n # Here is where we assume there is, at most, one decorator.\n # Might need to rethink this.\n decorator = line_stripped\n continue\n\n # does this look like an input line?\n matchin = rgxin.match(line)\n if matchin:\n lineno, inputline = int(matchin.group(1)), matchin.group(2)\n\n # the ....: continuation string\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n Nc = len(continuation)\n # input lines can continue on for more than one line, if\n # we have a '\\' line continuation char or a function call\n # echo line 'print'. The input line can only be\n # terminated by the end of the block or an output line, so\n # we parse out the rest of the input line if it is\n # multiline as well as any echo text\n\n rest = []\n while i<N:\n\n # look ahead; if the next line is blank, or a comment, or\n # an output line, we're done\n\n nextline = lines[i]\n matchout = rgxout.match(nextline)\n #print \"nextline=%s, continuation=%s, starts=%s\"%(nextline, continuation, nextline.startswith(continuation))\n if matchout or nextline.startswith('#'):\n break\n elif nextline.startswith(continuation):\n # The default ipython_rgx* treat the space following the colon as optional.\n # However, If the space is there we must consume it or code\n # employing the cython_magic extension will fail to execute.\n #\n # This works with the default ipython_rgx* patterns,\n # If you modify them, YMMV.\n nextline = nextline[Nc:]\n if nextline and nextline[0] == ' ':\n nextline = nextline[1:]\n\n inputline += '\\n' + nextline\n else:\n rest.append(nextline)\n i+= 1\n\n block.append((INPUT, (decorator, inputline, '\\n'.join(rest))))\n continue\n\n # if it looks like an output line grab all the text to the end\n # of the block\n matchout = rgxout.match(line)\n if matchout:\n lineno, output = int(matchout.group(1)), matchout.group(2)\n if i<N-1:\n output = '\\n'.join([output] + lines[i:])\n\n block.append((OUTPUT, output))\n break\n\n return block\n\n\nclass EmbeddedSphinxShell(object):\n \"\"\"An embedded IPython instance to run inside Sphinx\"\"\"\n\n def __init__(self, exec_lines=None):\n\n self.cout = StringIO()\n\n if exec_lines is None:\n exec_lines = []\n\n # Create config object for IPython\n config = Config()\n config.HistoryManager.hist_file = ':memory:'\n config.InteractiveShell.autocall = False\n config.InteractiveShell.autoindent = False\n config.InteractiveShell.colors = 'NoColor'\n\n # create a profile so instance history isn't saved\n tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')\n profname = 'auto_profile_sphinx_build'\n pdir = os.path.join(tmp_profile_dir,profname)\n profile = ProfileDir.create_profile_dir(pdir)\n\n # Create and initialize global ipython, but don't start its mainloop.\n # This will persist across different EmbededSphinxShell instances.\n IP = InteractiveShell.instance(config=config, profile_dir=profile)\n atexit.register(self.cleanup)\n\n sys.stdout = self.cout\n sys.stderr = self.cout\n\n # For debugging, so we can see normal output, use this:\n #from IPython.utils.io import Tee\n #sys.stdout = Tee(self.cout, channel='stdout') # dbg\n #sys.stderr = Tee(self.cout, channel='stderr') # dbg\n\n # Store a few parts of IPython we'll need.\n self.IP = IP\n self.user_ns = self.IP.user_ns\n self.user_global_ns = self.IP.user_global_ns\n\n self.input = ''\n self.output = ''\n self.tmp_profile_dir = tmp_profile_dir\n\n self.is_verbatim = False\n self.is_doctest = False\n self.is_suppress = False\n\n # Optionally, provide more detailed information to shell.\n # this is assigned by the SetUp method of IPythonDirective\n # to point at itself.\n #\n # So, you can access handy things at self.directive.state\n self.directive = None\n\n # on the first call to the savefig decorator, we'll import\n # pyplot as plt so we can make a call to the plt.gcf().savefig\n self._pyplot_imported = False\n\n # Prepopulate the namespace.\n for line in exec_lines:\n self.process_input_line(line, store_history=False)\n\n def cleanup(self):\n shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)\n\n def clear_cout(self):\n self.cout.seek(0)\n self.cout.truncate(0)\n\n def process_input_line(self, line, store_history=True):\n \"\"\"process the input, capturing stdout\"\"\"\n\n stdout = sys.stdout\n splitter = self.IP.input_splitter\n try:\n sys.stdout = self.cout\n splitter.push(line)\n more = splitter.push_accepts_more()\n if not more:\n source_raw = splitter.raw_reset()\n self.IP.run_cell(source_raw, store_history=store_history)\n finally:\n sys.stdout = stdout\n\n def process_image(self, decorator):\n \"\"\"\n # build out an image directive like\n # .. image:: somefile.png\n # :width 4in\n #\n # from an input like\n # savefig somefile.png width=4in\n \"\"\"\n savefig_dir = self.savefig_dir\n source_dir = self.source_dir\n saveargs = decorator.split(' ')\n filename = saveargs[1]\n # insert relative path to image file in source (as absolute path for Sphinx)\n outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename),\n source_dir)\n\n imagerows = ['.. image:: %s'%outfile]\n\n for kwarg in saveargs[2:]:\n arg, val = kwarg.split('=')\n arg = arg.strip()\n val = val.strip()\n imagerows.append(' :%s: %s'%(arg, val))\n\n image_file = os.path.basename(outfile) # only return file name\n image_directive = '\\n'.join(imagerows)\n return image_file, image_directive\n\n # Callbacks for each type of token\n def process_input(self, data, input_prompt, lineno):\n \"\"\"\n Process data block for INPUT token.\n\n \"\"\"\n decorator, input, rest = data\n image_file = None\n image_directive = None\n\n is_verbatim = decorator=='@verbatim' or self.is_verbatim\n is_doctest = (decorator is not None and \\\n decorator.startswith('@doctest')) or self.is_doctest\n is_suppress = decorator=='@suppress' or self.is_suppress\n is_okexcept = decorator=='@okexcept' or self.is_okexcept\n is_okwarning = decorator=='@okwarning' or self.is_okwarning\n is_savefig = decorator is not None and \\\n decorator.startswith('@savefig')\n\n input_lines = input.split('\\n')\n if len(input_lines) > 1:\n if input_lines[-1] != \"\":\n input_lines.append('') # make sure there's a blank line\n # so splitter buffer gets reset\n\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n\n if is_savefig:\n image_file, image_directive = self.process_image(decorator)\n\n ret = []\n is_semicolon = False\n\n # Hold the execution count, if requested to do so.\n if is_suppress and self.hold_count:\n store_history = False\n else:\n store_history = True\n\n # Note: catch_warnings is not thread safe\n with warnings.catch_warnings(record=True) as ws:\n for i, line in enumerate(input_lines):\n if line.endswith(';'):\n is_semicolon = True\n\n if i == 0:\n # process the first input line\n if is_verbatim:\n self.process_input_line('')\n self.IP.execution_count += 1 # increment it anyway\n else:\n # only submit the line in non-verbatim mode\n self.process_input_line(line, store_history=store_history)\n formatted_line = '%s %s'%(input_prompt, line)\n else:\n # process a continuation line\n if not is_verbatim:\n self.process_input_line(line, store_history=store_history)\n\n formatted_line = '%s %s'%(continuation, line)\n\n if not is_suppress:\n ret.append(formatted_line)\n\n if not is_suppress and len(rest.strip()) and is_verbatim:\n # The \"rest\" is the standard output of the input. This needs to be\n # added when in verbatim mode. If there is no \"rest\", then we don't\n # add it, as the new line will be added by the processed output.\n ret.append(rest)\n\n # Fetch the processed output. (This is not the submitted output.)\n self.cout.seek(0)\n processed_output = self.cout.read()\n if not is_suppress and not is_semicolon:\n #\n # In IPythonDirective.run, the elements of `ret` are eventually\n # combined such that '' entries correspond to newlines. So if\n # `processed_output` is equal to '', then the adding it to `ret`\n # ensures that there is a blank line between consecutive inputs\n # that have no outputs, as in:\n #\n # In [1]: x = 4\n #\n # In [2]: x = 5\n #\n # When there is processed output, it has a '\\n' at the tail end. So\n # adding the output to `ret` will provide the necessary spacing\n # between consecutive input/output blocks, as in:\n #\n # In [1]: x\n # Out[1]: 5\n #\n # In [2]: x\n # Out[2]: 5\n #\n # When there is stdout from the input, it also has a '\\n' at the\n # tail end, and so this ensures proper spacing as well. E.g.:\n #\n # In [1]: print x\n # 5\n #\n # In [2]: x = 5\n #\n # When in verbatim mode, `processed_output` is empty (because\n # nothing was passed to IP. Sometimes the submitted code block has\n # an Out[] portion and sometimes it does not. When it does not, we\n # need to ensure proper spacing, so we have to add '' to `ret`.\n # However, if there is an Out[] in the submitted code, then we do\n # not want to add a newline as `process_output` has stuff to add.\n # The difficulty is that `process_input` doesn't know if\n # `process_output` will be called---so it doesn't know if there is\n # Out[] in the code block. The requires that we include a hack in\n # `process_block`. See the comments there.\n #\n ret.append(processed_output)\n elif is_semicolon:\n # Make sure there is a newline after the semicolon.\n ret.append('')\n\n # context information\n filename = \"Unknown\"\n lineno = 0\n if self.directive.state:\n filename = self.directive.state.document.current_source\n lineno = self.directive.state.document.current_line\n\n # output any exceptions raised during execution to stdout\n # unless :okexcept: has been specified.\n if not is_okexcept and \"Traceback\" in processed_output:\n s = \"\\nException in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okexcept: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write(processed_output)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n\\n')\n\n # output any warning raised during execution to stdout\n # unless :okwarning: has been specified.\n if not is_okwarning:\n for w in ws:\n s = \"\\nWarning in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okwarning: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write(('-' * 76) + '\\n')\n s=warnings.formatwarning(w.message, w.category,\n w.filename, w.lineno, w.line)\n sys.stdout.write(s)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n')\n\n self.cout.truncate(0)\n\n return (ret, input_lines, processed_output,\n is_doctest, decorator, image_file, image_directive)\n\n\n def process_output(self, data, output_prompt, input_lines, output,\n is_doctest, decorator, image_file):\n \"\"\"\n Process data block for OUTPUT token.\n\n \"\"\"\n # Recall: `data` is the submitted output, and `output` is the processed\n # output from `input_lines`.\n\n TAB = ' ' * 4\n\n if is_doctest and output is not None:\n\n found = output # This is the processed output\n found = found.strip()\n submitted = data.strip()\n\n if self.directive is None:\n source = 'Unavailable'\n content = 'Unavailable'\n else:\n source = self.directive.state.document.current_source\n content = self.directive.content\n # Add tabs and join into a single string.\n content = '\\n'.join([TAB + line for line in content])\n\n # Make sure the output contains the output prompt.\n ind = found.find(output_prompt)\n if ind < 0:\n e = ('output does not contain output prompt\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'Input line(s):\\n{TAB}{2}\\n\\n'\n 'Output line(s):\\n{TAB}{3}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), TAB=TAB)\n raise RuntimeError(e)\n found = found[len(output_prompt):].strip()\n\n # Handle the actual doctest comparison.\n if decorator.strip() == '@doctest':\n # Standard doctest\n if found != submitted:\n e = ('doctest failure\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'On input line(s):\\n{TAB}{2}\\n\\n'\n 'we found output:\\n{TAB}{3}\\n\\n'\n 'instead of the expected:\\n{TAB}{4}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), repr(submitted), TAB=TAB)\n raise RuntimeError(e)\n else:\n self.custom_doctest(decorator, input_lines, found, submitted)\n\n # When in verbatim mode, this holds additional submitted output\n # to be written in the final Sphinx output.\n # https://github.com/ipython/ipython/issues/5776\n out_data = []\n\n is_verbatim = decorator=='@verbatim' or self.is_verbatim\n if is_verbatim and data.strip():\n # Note that `ret` in `process_block` has '' as its last element if\n # the code block was in verbatim mode. So if there is no submitted\n # output, then we will have proper spacing only if we do not add\n # an additional '' to `out_data`. This is why we condition on\n # `and data.strip()`.\n\n # The submitted output has no output prompt. If we want the\n # prompt and the code to appear, we need to join them now\n # instead of adding them separately---as this would create an\n # undesired newline. How we do this ultimately depends on the\n # format of the output regex. I'll do what works for the default\n # prompt for now, and we might have to adjust if it doesn't work\n # in other cases. Finally, the submitted output does not have\n # a trailing newline, so we must add it manually.\n out_data.append(\"{0} {1}\\n\".format(output_prompt, data))\n\n return out_data\n\n def process_comment(self, data):\n \"\"\"Process data fPblock for COMMENT token.\"\"\"\n if not self.is_suppress:\n return [data]\n\n def save_image(self, image_file):\n \"\"\"\n Saves the image file to disk.\n \"\"\"\n self.ensure_pyplot()\n command = 'plt.gcf().savefig(\"%s\")'%image_file\n #print 'SAVEFIG', command # dbg\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\n self.process_input_line('cd -b ipy_savedir', store_history=False)\n self.process_input_line(command, store_history=False)\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\n self.clear_cout()\n\n def process_block(self, block):\n \"\"\"\n process block from the block_parser and return a list of processed lines\n \"\"\"\n ret = []\n output = None\n input_lines = None\n lineno = self.IP.execution_count\n\n input_prompt = self.promptin % lineno\n output_prompt = self.promptout % lineno\n image_file = None\n image_directive = None\n\n found_input = False\n for token, data in block:\n if token == COMMENT:\n out_data = self.process_comment(data)\n elif token == INPUT:\n found_input = True\n (out_data, input_lines, output, is_doctest,\n decorator, image_file, image_directive) = \\\n self.process_input(data, input_prompt, lineno)\n elif token == OUTPUT:\n if not found_input:\n\n TAB = ' ' * 4\n linenumber = 0\n source = 'Unavailable'\n content = 'Unavailable'\n if self.directive:\n linenumber = self.directive.state.document.current_line\n source = self.directive.state.document.current_source\n content = self.directive.content\n # Add tabs and join into a single string.\n content = '\\n'.join([TAB + line for line in content])\n\n e = ('\\n\\nInvalid block: Block contains an output prompt '\n 'without an input prompt.\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Content begins at line {1}: \\n\\n{2}\\n\\n'\n 'Problematic block within content: \\n\\n{TAB}{3}\\n\\n')\n e = e.format(source, linenumber, content, block, TAB=TAB)\n\n # Write, rather than include in exception, since Sphinx\n # will truncate tracebacks.\n sys.stdout.write(e)\n raise RuntimeError('An invalid block was detected.')\n\n out_data = \\\n self.process_output(data, output_prompt, input_lines,\n output, is_doctest, decorator,\n image_file)\n if out_data:\n # Then there was user submitted output in verbatim mode.\n # We need to remove the last element of `ret` that was\n # added in `process_input`, as it is '' and would introduce\n # an undesirable newline.\n assert(ret[-1] == '')\n del ret[-1]\n\n if out_data:\n ret.extend(out_data)\n\n # save the image files\n if image_file is not None:\n self.save_image(image_file)\n\n return ret, image_directive\n\n def ensure_pyplot(self):\n \"\"\"\n Ensures that pyplot has been imported into the embedded IPython shell.\n\n Also, makes sure to set the backend appropriately if not set already.\n\n \"\"\"\n # We are here if the @figure pseudo decorator was used. Thus, it's\n # possible that we could be here even if python_mplbackend were set to\n # `None`. That's also strange and perhaps worthy of raising an\n # exception, but for now, we just set the backend to 'agg'.\n\n if not self._pyplot_imported:\n if 'matplotlib.backends' not in sys.modules:\n # Then ipython_matplotlib was set to None but there was a\n # call to the @figure decorator (and ipython_execlines did\n # not set a backend).\n #raise Exception(\"No backend was set, but @figure was used!\")\n import matplotlib\n matplotlib.use('agg')\n\n # Always import pyplot into embedded shell.\n self.process_input_line('import matplotlib.pyplot as plt',\n store_history=False)\n self._pyplot_imported = True\n\n def process_pure_python(self, content):\n \"\"\"\n content is a list of strings. it is unedited directive content\n\n This runs it line by line in the InteractiveShell, prepends\n prompts as needed capturing stderr and stdout, then returns\n the content as a list as if it were ipython code\n \"\"\"\n output = []\n savefig = False # keep up with this to clear figure\n multiline = False # to handle line continuation\n multiline_start = None\n fmtin = self.promptin\n\n ct = 0\n\n for lineno, line in enumerate(content):\n\n line_stripped = line.strip()\n if not len(line):\n output.append(line)\n continue\n\n # handle decorators\n if line_stripped.startswith('@'):\n output.extend([line])\n if 'savefig' in line:\n savefig = True # and need to clear figure\n continue\n\n # handle comments\n if line_stripped.startswith('#'):\n output.extend([line])\n continue\n\n # deal with lines checking for multiline\n continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))\n if not multiline:\n modified = u\"%s %s\" % (fmtin % ct, line_stripped)\n output.append(modified)\n ct += 1\n try:\n ast.parse(line_stripped)\n output.append(u'')\n except Exception: # on a multiline\n multiline = True\n multiline_start = lineno\n else: # still on a multiline\n modified = u'%s %s' % (continuation, line)\n output.append(modified)\n\n # if the next line is indented, it should be part of multiline\n if len(content) > lineno + 1:\n nextline = content[lineno + 1]\n if len(nextline) - len(nextline.lstrip()) > 3:\n continue\n try:\n mod = ast.parse(\n '\\n'.join(content[multiline_start:lineno+1]))\n if isinstance(mod.body[0], ast.FunctionDef):\n # check to see if we have the whole function\n for element in mod.body[0].body:\n if isinstance(element, ast.Return):\n multiline = False\n else:\n output.append(u'')\n multiline = False\n except Exception:\n pass\n\n if savefig: # clear figure if plotted\n self.ensure_pyplot()\n self.process_input_line('plt.clf()', store_history=False)\n self.clear_cout()\n savefig = False\n\n return output\n\n def custom_doctest(self, decorator, input_lines, found, submitted):\n \"\"\"\n Perform a specialized doctest.\n\n \"\"\"\n from .custom_doctests import doctests\n\n args = decorator.split()\n doctest_type = args[1]\n if doctest_type in doctests:\n doctests[doctest_type](self, args, input_lines, found, submitted)\n else:\n e = \"Invalid option to @doctest: {0}\".format(doctest_type)\n raise Exception(e)\n\n\nclass IPythonDirective(Directive):\n\n has_content = True\n required_arguments = 0\n optional_arguments = 4 # python, suppress, verbatim, doctest\n final_argumuent_whitespace = True\n option_spec = { 'python': directives.unchanged,\n 'suppress' : directives.flag,\n 'verbatim' : directives.flag,\n 'doctest' : directives.flag,\n 'okexcept': directives.flag,\n 'okwarning': directives.flag\n }\n\n shell = None\n\n seen_docs = set()\n\n def get_config_options(self):\n # contains sphinx configuration variables\n config = self.state.document.settings.env.config\n\n # get config variables to set figure output directory\n savefig_dir = config.ipython_savefig_dir\n source_dir = self.state.document.settings.env.srcdir\n savefig_dir = os.path.join(source_dir, savefig_dir)\n\n # get regex and prompt stuff\n rgxin = config.ipython_rgxin\n rgxout = config.ipython_rgxout\n promptin = config.ipython_promptin\n promptout = config.ipython_promptout\n mplbackend = config.ipython_mplbackend\n exec_lines = config.ipython_execlines\n hold_count = config.ipython_holdcount\n\n return (savefig_dir, source_dir, rgxin, rgxout,\n promptin, promptout, mplbackend, exec_lines, hold_count)\n\n def setup(self):\n # Get configuration values.\n (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,\n mplbackend, exec_lines, hold_count) = self.get_config_options()\n\n try:\n os.makedirs(savefig_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n if self.shell is None:\n # We will be here many times. However, when the\n # EmbeddedSphinxShell is created, its interactive shell member\n # is the same for each instance.\n\n if mplbackend and 'matplotlib.backends' not in sys.modules:\n import matplotlib\n matplotlib.use(mplbackend)\n\n # Must be called after (potentially) importing matplotlib and\n # setting its backend since exec_lines might import pylab.\n self.shell = EmbeddedSphinxShell(exec_lines)\n\n # Store IPython directive to enable better error messages\n self.shell.directive = self\n\n # reset the execution count if we haven't processed this doc\n #NOTE: this may be borked if there are multiple seen_doc tmp files\n #check time stamp?\n if not self.state.document.current_source in self.seen_docs:\n self.shell.IP.history_manager.reset()\n self.shell.IP.execution_count = 1\n self.seen_docs.add(self.state.document.current_source)\n\n # and attach to shell so we don't have to pass them around\n self.shell.rgxin = rgxin\n self.shell.rgxout = rgxout\n self.shell.promptin = promptin\n self.shell.promptout = promptout\n self.shell.savefig_dir = savefig_dir\n self.shell.source_dir = source_dir\n self.shell.hold_count = hold_count\n\n # setup bookmark for saving figures directory\n self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,\n store_history=False)\n self.shell.clear_cout()\n\n return rgxin, rgxout, promptin, promptout\n\n def teardown(self):\n # delete last bookmark\n self.shell.process_input_line('bookmark -d ipy_savedir',\n store_history=False)\n self.shell.clear_cout()\n\n def run(self):\n debug = False\n\n #TODO, any reason block_parser can't be a method of embeddable shell\n # then we wouldn't have to carry these around\n rgxin, rgxout, promptin, promptout = self.setup()\n\n options = self.options\n self.shell.is_suppress = 'suppress' in options\n self.shell.is_doctest = 'doctest' in options\n self.shell.is_verbatim = 'verbatim' in options\n self.shell.is_okexcept = 'okexcept' in options\n self.shell.is_okwarning = 'okwarning' in options\n\n # handle pure python code\n if 'python' in self.arguments:\n content = self.content\n self.content = self.shell.process_pure_python(content)\n\n # parts consists of all text within the ipython-block.\n # Each part is an input/output block.\n parts = '\\n'.join(self.content).split('\\n\\n')\n\n lines = ['.. code-block:: ipython', '']\n figures = []\n\n for part in parts:\n block = block_parser(part, rgxin, rgxout, promptin, promptout)\n if len(block):\n rows, figure = self.shell.process_block(block)\n for row in rows:\n lines.extend([' {0}'.format(line)\n for line in row.split('\\n')])\n\n if figure is not None:\n figures.append(figure)\n\n for figure in figures:\n lines.append('')\n lines.extend(figure.split('\\n'))\n lines.append('')\n\n if len(lines) > 2:\n if debug:\n print('\\n'.join(lines))\n else:\n # This has to do with input, not output. But if we comment\n # these lines out, then no IPython code will appear in the\n # final output.\n self.state_machine.insert_input(\n lines, self.state_machine.input_lines.source(0))\n\n # cleanup\n self.teardown()\n\n return []\n\n# Enable as a proper Sphinx directive\ndef setup(app):\n setup.app = app\n\n app.add_directive('ipython', IPythonDirective)\n app.add_config_value('ipython_savefig_dir', 'savefig', 'env')\n app.add_config_value('ipython_rgxin',\n re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_rgxout',\n re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_promptin', 'In [%d]:', 'env')\n app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')\n\n # We could just let matplotlib pick whatever is specified as the default\n # backend in the matplotlibrc file, but this would cause issues if the\n # backend didn't work in headless environments. For this reason, 'agg'\n # is a good default backend choice.\n app.add_config_value('ipython_mplbackend', 'agg', 'env')\n\n # If the user sets this config value to `None`, then EmbeddedSphinxShell's\n # __init__ method will treat it as [].\n execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']\n app.add_config_value('ipython_execlines', execlines, 'env')\n\n app.add_config_value('ipython_holdcount', True, 'env')\n\n metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}\n return metadata\n\n# Simple smoke test, needs to be converted to a proper automatic test.\ndef test():\n\n examples = [\n r\"\"\"\nIn [9]: pwd\nOut[9]: '/home/jdhunter/py4science/book'\n\nIn [10]: cd bookdata/\n/home/jdhunter/py4science/book/bookdata\n\nIn [2]: from pylab import *\n\nIn [2]: ion()\n\nIn [3]: im = imread('stinkbug.png')\n\n@savefig mystinkbug.png width=4in\nIn [4]: imshow(im)\nOut[4]: <matplotlib.image.AxesImage object at 0x39ea850>\n\n\"\"\",\n r\"\"\"\n\nIn [1]: x = 'hello world'\n\n# string methods can be\n# used to alter the string\n@doctest\nIn [2]: x.upper()\nOut[2]: 'HELLO WORLD'\n\n@verbatim\nIn [3]: x.st<TAB>\nx.startswith x.strip\n\"\"\",\n r\"\"\"\n\nIn [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\\\n .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'\n\nIn [131]: print url.split('&')\n['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']\n\nIn [60]: import urllib\n\n\"\"\",\n r\"\"\"\\\n\nIn [133]: import numpy.random\n\n@suppress\nIn [134]: numpy.random.seed(2358)\n\n@doctest\nIn [135]: numpy.random.rand(10,2)\nOut[135]:\narray([[ 0.64524308, 0.59943846],\n [ 0.47102322, 0.8715456 ],\n [ 0.29370834, 0.74776844],\n [ 0.99539577, 0.1313423 ],\n [ 0.16250302, 0.21103583],\n [ 0.81626524, 0.1312433 ],\n [ 0.67338089, 0.72302393],\n [ 0.7566368 , 0.07033696],\n [ 0.22591016, 0.77731835],\n [ 0.0072729 , 0.34273127]])\n\n\"\"\",\n\n r\"\"\"\nIn [106]: print x\njdh\n\nIn [109]: for i in range(10):\n .....: print i\n .....:\n .....:\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n\"\"\",\n\n r\"\"\"\n\nIn [144]: from pylab import *\n\nIn [145]: ion()\n\n# use a semicolon to suppress the output\n@savefig test_hist.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\n@savefig test_plot.png width=4in\nIn [151]: plot(np.random.randn(10000), 'o');\n \"\"\",\n\n r\"\"\"\n# use a semicolon to suppress the output\nIn [151]: plt.clf()\n\n@savefig plot_simple.png width=4in\nIn [151]: plot([1,2,3])\n\n@savefig hist_simple.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\"\"\",\n r\"\"\"\n# update the current fig\nIn [151]: ylabel('number')\n\nIn [152]: title('normal distribution')\n\n\n@savefig hist_with_text.png\nIn [153]: grid(True)\n\n@doctest float\nIn [154]: 0.1 + 0.2\nOut[154]: 0.3\n\n@doctest float\nIn [155]: np.arange(16).reshape(4,4)\nOut[155]:\narray([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\nIn [1]: x = np.arange(16, dtype=float).reshape(4,4)\n\nIn [2]: x[0,0] = np.inf\n\nIn [3]: x[0,1] = np.nan\n\n@doctest float\nIn [4]: x\nOut[4]:\narray([[ inf, nan, 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n\n\n \"\"\",\n ]\n # skip local-file depending first example:\n examples = examples[1:]\n\n #ipython_directive.DEBUG = True # dbg\n #options = dict(suppress=True) # dbg\n options = {}\n for example in examples:\n content = example.split('\\n')\n IPythonDirective('debug', arguments=None, options=options,\n content=content, lineno=0,\n content_offset=None, block_text=None,\n state=None, state_machine=None,\n )\n\n# Run test suite as a script\nif __name__=='__main__':\n if not os.path.isdir('_static'):\n os.mkdir('_static')\n test()\n print('All OK? Check figures in _static/')\n", "path": "IPython/sphinxext/ipython_directive.py" } ]
diff --git a/IPython/sphinxext/ipython_directive.py b/IPython/sphinxext/ipython_directive.py index 82e98e2693f..dfa4a2ab8bf 100644 --- a/IPython/sphinxext/ipython_directive.py +++ b/IPython/sphinxext/ipython_directive.py @@ -138,7 +138,7 @@ # Third-party from docutils.parsers.rst import directives -from sphinx.util.compat import Directive +from docutils.parsers.rst import Directive # Our own from traitlets.config import Config
docker__docker-py-1204
Issue with requests dependency I found that commit 95d9306d2a1fd22dffb12a0548abf2d2f744ed9d excludes requests 2.11 for a bug that is fixed now on requests 2.11.1. And that's giving me a version conflict with another of the modules on my project: ``` pkg_resources.ContextualVersionConflict: (requests 2.11.1 (..............), Requirement.parse('requests<2.11,>=2.5.2'), {'docker-py'}) ``` Can we allow requests 2.11.1 ?
[ { "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/requirements.txt b/requirements.txt index 1e5284600..375413122 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -requests==2.5.3 +requests==2.11.1 six>=1.4.0 websocket-client==0.32.0 backports.ssl_match_hostname>=3.5 ; python_version < '3.5' ipaddress==1.0.16 ; python_version < '3.3' -docker-pycreds==0.2.1 \ No newline at end of file +docker-pycreds==0.2.1 diff --git a/setup.py b/setup.py index 9233ac2a8..bdcf3cd7b 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ SOURCE_DIR = os.path.join(ROOT_DIR) requirements = [ - 'requests >= 2.5.2, < 2.11', + 'requests >= 2.5.2', 'six >= 1.4.0', 'websocket-client >= 0.32.0', 'docker-pycreds >= 0.2.1'
meltano__meltano-8343
perf: Project's `.env` contents are evaluated too many times ### Meltano Version 3.2.0 ### Python Version NA ### Bug scope Configuration (settings parsing, validation, etc.) ### Operating System NA ### Description The `Project.dotenv_env` is evaluated many times per run, so a potential performance improvement would be to cache it: https://github.com/meltano/meltano/blob/19197396dee4145b039f70210a172ef72c1f464e/src/meltano/core/project.py#L375-L382 #### Justification See this flamegraph provided by @TyShkan: https://static.main.linendev.com/attachments/876cebd1-f3ad-485c-81a0-4b1cc971bc6a/profile.svg #### Measuring performance It would be nice to have https://github.com/meltano/meltano/issues/8341 in place before implementing this so we get a measure of the improvement but it's probably not a hard requirement. ### Code _No response_
[ { "content": "\"\"\"Meltano Projects.\"\"\"\n\n\nfrom __future__ import annotations\n\nimport errno\nimport logging\nimport os\nimport sys\nimport threading\nimport typing as t\nfrom contextlib import contextmanager\nfrom functools import cached_property\nfrom pathlib import Path\n\nimport fasteners\nfrom dotenv import dotenv_values\n\nfrom meltano.core import yaml\nfrom meltano.core.behavior.versioned import Versioned\nfrom meltano.core.config_service import ConfigService\nfrom meltano.core.environment import Environment\nfrom meltano.core.error import (\n EmptyMeltanoFileException,\n ProjectNotFound,\n ProjectReadonly,\n)\nfrom meltano.core.hub import MeltanoHubService\nfrom meltano.core.project_files import ProjectFiles\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.utils import makedirs, sanitize_filename, truthy\n\nif t.TYPE_CHECKING:\n from meltano.core.meltano_file import MeltanoFile as MeltanoFileTypeHint\n from meltano.core.plugin.base import PluginRef\n\n\nlogger = logging.getLogger(__name__)\n\n\nPROJECT_ROOT_ENV = \"MELTANO_PROJECT_ROOT\"\nPROJECT_ENVIRONMENT_ENV = \"MELTANO_ENVIRONMENT\"\nPROJECT_READONLY_ENV = \"MELTANO_PROJECT_READONLY\"\nPROJECT_SYS_DIR_ROOT_ENV = \"MELTANO_SYS_DIR_ROOT\"\n\n\ndef walk_parent_directories():\n \"\"\"Yield each directory starting with the current up to the root.\n\n Yields:\n parent directories\n \"\"\"\n directory = os.getcwd()\n while True:\n yield directory\n\n parent_directory = os.path.dirname(directory)\n if parent_directory == directory:\n return\n directory = parent_directory\n\n\nclass Project(Versioned): # noqa: WPS214\n \"\"\"Represents a Meltano project.\"\"\"\n\n __version__ = 1\n _activate_lock = threading.Lock()\n _find_lock = threading.Lock()\n _meltano_rw_lock = fasteners.ReaderWriterLock()\n _default = None\n\n def __init__(\n self,\n root: os.PathLike,\n environment: Environment | None = None,\n readonly: bool = False,\n ):\n \"\"\"Initialize a `Project` instance.\n\n Args:\n root: The root directory of the project.\n environment: The active Meltano environment.\n readonly: Whether the project is in read-only mode.\n \"\"\"\n self.root = Path(root).resolve()\n self.environment: Environment | None = environment\n self.readonly = readonly\n self.sys_dir_root = Path(\n os.getenv(PROJECT_SYS_DIR_ROOT_ENV, self.root / \".meltano\"),\n ).resolve()\n\n def refresh(self, **kwargs) -> None:\n \"\"\"Refresh the project instance to reflect external changes.\n\n This should be called whenever env vars change, project files change,\n or other significant changes to the outside world occur.\n\n Args:\n kwargs: Keyword arguments for the new instance. These overwrite the\n defaults provided by the current instance. For example, if a\n Meltano environment has been activated, the project can be\n refreshed with this new environment by running\n `project.refresh(environment=environment)`.\n \"\"\"\n kwargs = {\n \"root\": self.root,\n \"environment\": self.environment,\n \"readonly\": self.readonly,\n **kwargs,\n }\n cls = type(self) # noqa: WPS117\n # Clear the dictionary backing `self` to invalidate outdated info,\n # cached properties, etc., then instantiate an up-to-date instance,\n # then steal its attributes to update the dictionary backing `self`.\n # This trick makes it as if the instance was just created, yet keeps\n # all existing references to it valid.\n self.__dict__.clear()\n self.__dict__.update(cls(**kwargs).__dict__)\n\n @cached_property\n def config_service(self):\n \"\"\"Get the project config service.\n\n Returns:\n A `ConfigService` instance for this project.\n \"\"\"\n return ConfigService(self)\n\n @cached_property\n def project_files(self) -> ProjectFiles:\n \"\"\"Return a singleton `ProjectFiles` file manager instance.\n\n Returns:\n `ProjectFiles` file manager.\n \"\"\"\n return ProjectFiles(root=self.root, meltano_file_path=self.meltanofile)\n\n @cached_property\n def settings(self):\n \"\"\"Get the project settings.\n\n Returns:\n A `ProjectSettingsService` instance for this project.\n \"\"\"\n return ProjectSettingsService(self)\n\n @cached_property\n def plugins(self):\n \"\"\"Get the project plugins.\n\n Returns:\n A `ProjectPluginsService` instance for this project.\n \"\"\"\n return ProjectPluginsService(self)\n\n @cached_property\n def hub_service(self):\n \"\"\"Get the Meltano Hub service.\n\n Returns:\n A `MeltanoHubService` instance for this project.\n \"\"\"\n return MeltanoHubService(self)\n\n @cached_property\n def _meltano_interprocess_lock(self):\n return fasteners.InterProcessLock(self.run_dir(\"meltano.yml.lock\"))\n\n @property\n def env(self):\n \"\"\"Get environment variables for this project.\n\n Returns:\n dict of environment variables and values for this project.\n \"\"\"\n environment_name = self.environment.name if self.environment else \"\"\n return {\n PROJECT_ROOT_ENV: str(self.root),\n PROJECT_ENVIRONMENT_ENV: environment_name,\n PROJECT_SYS_DIR_ROOT_ENV: str(self.sys_dir_root),\n }\n\n @classmethod\n @fasteners.locked(lock=\"_activate_lock\")\n def activate(cls, project: Project):\n \"\"\"Activate the given Project.\n\n Args:\n project: the Project to activate\n\n Raises:\n OSError: if project cannot be activated due to unsupported OS\n \"\"\"\n import ctypes\n\n project.ensure_compatible()\n\n # create a symlink to our current binary\n try:\n # check if running on Windows\n if os.name == \"nt\":\n executable = Path(sys.executable).parent / \"meltano.exe\"\n # Admin privileges are required to create symlinks on Windows\n if ctypes.windll.shell32.IsUserAnAdmin():\n if executable.is_file():\n project.run_dir().joinpath(\"bin\").symlink_to(executable)\n else:\n logger.warning(\n \"Could not create symlink: meltano.exe not \"\n f\"present in {str(Path(sys.executable).parent)}\",\n )\n else:\n logger.warning(\n \"Failed to create symlink to 'meltano.exe': \"\n \"administrator privilege required\",\n )\n else:\n executable = Path(sys.executable).parent / \"meltano\"\n if executable.is_file():\n project.run_dir().joinpath(\"bin\").symlink_to(executable)\n except FileExistsError:\n pass\n except OSError as error:\n if error.errno == errno.EOPNOTSUPP:\n logger.warning(\n f\"Could not create symlink: {error}\\nPlease make sure \"\n \"that the underlying filesystem supports symlinks.\",\n )\n else:\n raise\n\n logger.debug(f\"Activated project at {project.root}\")\n\n # set the default project\n cls._default = project\n\n @classmethod\n def deactivate(cls):\n \"\"\"Deactivate the given Project.\"\"\"\n cls._default = None\n\n @property\n def file_version(self):\n \"\"\"Get the version of Meltano found in this project's meltano.yml.\n\n Returns:\n the Project's meltano version\n \"\"\"\n return self.meltano.version\n\n @classmethod\n @fasteners.locked(lock=\"_find_lock\")\n def find(cls, project_root: Path | str | None = None, activate=True):\n \"\"\"Find a Project.\n\n Args:\n project_root: The path to the root directory of the project. If not\n supplied, infer from PROJECT_ROOT_ENV or the current working\n directory and it's parents.\n activate: Save the found project so that future calls to `find`\n will continue to use this project.\n\n Returns:\n the found project\n\n Raises:\n ProjectNotFound: if the provided `project_root` is not a Meltano\n project, or the current working directory is not a Meltano\n project or a subfolder of one.\n \"\"\"\n if cls._default:\n return cls._default\n\n readonly = truthy(os.getenv(PROJECT_READONLY_ENV, \"false\"))\n\n if project_root := project_root or os.getenv(PROJECT_ROOT_ENV):\n project = Project(project_root, readonly=readonly)\n if not project.meltanofile.exists():\n raise ProjectNotFound(project)\n else:\n for directory in walk_parent_directories():\n project = Project(directory, readonly=readonly)\n if project.meltanofile.exists():\n break\n if not project.meltanofile.exists():\n raise ProjectNotFound(Project(os.getcwd()))\n\n readonly = project.settings.get(\"project_readonly\")\n if readonly != project.readonly:\n project.refresh(readonly=readonly)\n\n if activate:\n cls.activate(project)\n\n return project\n\n @property\n def meltano(self) -> MeltanoFileTypeHint:\n \"\"\"Return a copy of the current meltano config.\n\n Raises:\n EmptyMeltanoFileException: The `meltano.yml` file is empty.\n\n Returns:\n The current meltano config.\n \"\"\"\n from meltano.core.meltano_file import MeltanoFile\n\n conf: dict[str, t.Any] = yaml.load(self.meltanofile)\n if conf is None:\n raise EmptyMeltanoFileException\n\n with self._meltano_rw_lock.read_lock():\n return MeltanoFile.parse(self.project_files.load())\n\n @contextmanager\n def meltano_update(self):\n \"\"\"Yield the current meltano configuration.\n\n Update the meltanofile if the context ends gracefully.\n\n Yields:\n the current meltano configuration\n\n Raises:\n ProjectReadonly: This project is readonly.\n Exception: The project files could not be updated.\n \"\"\"\n if self.readonly:\n raise ProjectReadonly\n\n from meltano.core.meltano_file import MeltanoFile\n\n with self._meltano_rw_lock.write_lock(), self._meltano_interprocess_lock:\n meltano_config = MeltanoFile.parse(self.project_files.load())\n yield meltano_config\n try:\n self.project_files.update(meltano_config.canonical())\n except Exception as err:\n logger.critical(\"Could not update meltano.yml: %s\", err) # noqa: WPS323\n raise\n\n self.refresh()\n\n def root_dir(self, *joinpaths):\n \"\"\"Return the root directory of this project, optionally joined with path.\n\n Args:\n joinpaths: list of subdirs and/or file to join to project root.\n\n Returns:\n project root joined with provided subdirs and/or file\n \"\"\"\n return self.root.joinpath(*joinpaths)\n\n @property\n def meltanofile(self):\n \"\"\"Get the path to this project's meltano.yml.\n\n Returns:\n the path to this project meltano.yml\n \"\"\"\n return self.root.joinpath(\"meltano.yml\")\n\n @property\n def dotenv(self):\n \"\"\"Get the path to this project's .env file.\n\n Returns:\n the path to this project's .env file\n \"\"\"\n return self.root.joinpath(\".env\")\n\n @property\n def dotenv_env(self):\n \"\"\"Get values from this project's .env file.\n\n Returns:\n values found in this project's .env file\n \"\"\"\n return dotenv_values(self.dotenv)\n\n def activate_environment(self, name: str) -> None:\n \"\"\"Activate a Meltano environment.\n\n No-op if the active environment has the given name.\n\n Args:\n name: Name of the environment.\n \"\"\"\n if getattr(self.environment, \"name\", object()) != name:\n self.refresh(environment=Environment.find(self.meltano.environments, name))\n logger.info(f\"Environment {name!r} is active\")\n\n def deactivate_environment(self) -> None:\n \"\"\"Deactivate the currently active environment.\"\"\"\n if self.environment is not None:\n self.refresh(environment=None)\n\n @contextmanager\n def dotenv_update(self):\n \"\"\"Raise error if project is readonly.\n\n Used in context where .env files would be updated.\n\n Yields:\n the .env file\n\n Raises:\n ProjectReadonly: if the project is readonly\n \"\"\"\n if self.readonly:\n raise ProjectReadonly\n\n yield self.dotenv\n self.refresh()\n\n @makedirs\n def meltano_dir(self, *joinpaths):\n \"\"\"Path to the project `.meltano` directory.\n\n Args:\n joinpaths: Paths to join to the `.meltano` directory.\n\n Returns:\n Resolved path to `.meltano` dir optionally joined to given paths.\n \"\"\"\n return self.sys_dir_root.joinpath(*joinpaths)\n\n @makedirs\n def analyze_dir(self, *joinpaths):\n \"\"\"Path to the project `analyze` directory.\n\n Args:\n joinpaths: Paths to join to the `analyze` directory.\n\n Returns:\n Resolved path to `analyze` dir optionally joined to given paths.\n \"\"\"\n return self.root_dir(\"analyze\", *joinpaths)\n\n @makedirs\n def extract_dir(self, *joinpaths):\n \"\"\"Path to the project `extract` directory.\n\n Args:\n joinpaths: Paths to join to the `extract` directory.\n\n Returns:\n Resolved path to `extract` dir optionally joined to given paths.\n \"\"\"\n return self.root_dir(\"extract\", *joinpaths)\n\n @makedirs\n def venvs_dir(self, *prefixes):\n \"\"\"Path to a `venv` directory in `.meltano`.\n\n Args:\n prefixes: Paths to prepend to the `venv` directory in `.meltano`.\n\n Returns:\n Resolved path to `venv` dir optionally prepended with given prefixes.\n \"\"\"\n return self.meltano_dir(*prefixes, \"venv\")\n\n @makedirs\n def run_dir(self, *joinpaths):\n \"\"\"Path to the `run` directory in `.meltano`.\n\n Args:\n joinpaths: Paths to join to the `run` directory in `.meltano`.\n\n Returns:\n Resolved path to `run` dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(\"run\", *joinpaths)\n\n @makedirs\n def logs_dir(self, *joinpaths):\n \"\"\"Path to the `logs` directory in `.meltano`.\n\n Args:\n joinpaths: Paths to join to the `logs` directory in `.meltano`.\n\n Returns:\n Resolved path to `logs` dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(\"logs\", *joinpaths)\n\n @makedirs\n def job_dir(self, state_id, *joinpaths):\n \"\"\"Path to the `elt` directory in `.meltano/run`.\n\n Args:\n state_id: State ID of `run` dir.\n joinpaths: Paths to join to the `elt` directory in `.meltano`.\n\n Returns:\n Resolved path to `elt` dir optionally joined to given paths.\n \"\"\"\n return self.run_dir(\"elt\", sanitize_filename(state_id), *joinpaths)\n\n @makedirs\n def job_logs_dir(self, state_id, *joinpaths):\n \"\"\"Path to the `elt` directory in `.meltano/logs`.\n\n Args:\n state_id: State ID of `logs` dir.\n joinpaths: Paths to join to the `elt` directory in `.meltano/logs`.\n\n Returns:\n Resolved path to `elt` dir optionally joined to given paths.\n \"\"\"\n return self.logs_dir(\"elt\", sanitize_filename(state_id), *joinpaths)\n\n @makedirs\n def plugin_dir(self, plugin: PluginRef, *joinpaths):\n \"\"\"Path to the plugin installation directory in `.meltano`.\n\n Args:\n plugin: Plugin to retrieve or create directory for.\n joinpaths: Paths to join to the plugin installation directory in `.meltano`.\n\n Returns:\n Resolved path to plugin installation dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(plugin.type, plugin.name, *joinpaths)\n\n @makedirs\n def root_plugins_dir(self, *joinpaths: str):\n \"\"\"Path to the project `plugins` directory.\n\n Args:\n joinpaths: Paths to join with the project `plugins` directory.\n\n Returns:\n Path to the project `plugins` directory.\n \"\"\"\n return self.root_dir(\"plugins\", *joinpaths)\n\n @makedirs\n def plugin_lock_path(\n self,\n plugin_type: str,\n plugin_name: str,\n variant_name: str | None = None,\n ):\n \"\"\"Path to the project lock file.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n Path to the plugin lock file.\n \"\"\"\n filename = f\"{plugin_name}\"\n\n if variant_name:\n filename = f\"{filename}--{variant_name}\"\n\n return self.root_plugins_dir(plugin_type, f\"{filename}.lock\")\n\n def __eq__(self, other):\n \"\"\"Project equivalence check.\n\n Args:\n other: The other Project instance to check against.\n\n Returns:\n True if Projects are equal.\n \"\"\"\n return self.root == getattr(other, \"root\", object())\n\n def __hash__(self):\n \"\"\"Project hash.\n\n Returns:\n Project hash.\n \"\"\"\n return self.root.__hash__() # noqa: WPS609\n", "path": "src/meltano/core/project.py" } ]
[ { "content": "\"\"\"Meltano Projects.\"\"\"\n\n\nfrom __future__ import annotations\n\nimport errno\nimport logging\nimport os\nimport sys\nimport threading\nimport typing as t\nfrom contextlib import contextmanager\nfrom functools import cached_property\nfrom pathlib import Path\n\nimport fasteners\nfrom dotenv import dotenv_values\n\nfrom meltano.core import yaml\nfrom meltano.core.behavior.versioned import Versioned\nfrom meltano.core.config_service import ConfigService\nfrom meltano.core.environment import Environment\nfrom meltano.core.error import (\n EmptyMeltanoFileException,\n ProjectNotFound,\n ProjectReadonly,\n)\nfrom meltano.core.hub import MeltanoHubService\nfrom meltano.core.project_files import ProjectFiles\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.utils import makedirs, sanitize_filename, truthy\n\nif t.TYPE_CHECKING:\n from meltano.core.meltano_file import MeltanoFile as MeltanoFileTypeHint\n from meltano.core.plugin.base import PluginRef\n\n\nlogger = logging.getLogger(__name__)\n\n\nPROJECT_ROOT_ENV = \"MELTANO_PROJECT_ROOT\"\nPROJECT_ENVIRONMENT_ENV = \"MELTANO_ENVIRONMENT\"\nPROJECT_READONLY_ENV = \"MELTANO_PROJECT_READONLY\"\nPROJECT_SYS_DIR_ROOT_ENV = \"MELTANO_SYS_DIR_ROOT\"\n\n\ndef walk_parent_directories():\n \"\"\"Yield each directory starting with the current up to the root.\n\n Yields:\n parent directories\n \"\"\"\n directory = os.getcwd()\n while True:\n yield directory\n\n parent_directory = os.path.dirname(directory)\n if parent_directory == directory:\n return\n directory = parent_directory\n\n\nclass Project(Versioned): # noqa: WPS214\n \"\"\"Represents a Meltano project.\"\"\"\n\n __version__ = 1\n _activate_lock = threading.Lock()\n _find_lock = threading.Lock()\n _meltano_rw_lock = fasteners.ReaderWriterLock()\n _default = None\n\n def __init__(\n self,\n root: os.PathLike,\n environment: Environment | None = None,\n readonly: bool = False,\n ):\n \"\"\"Initialize a `Project` instance.\n\n Args:\n root: The root directory of the project.\n environment: The active Meltano environment.\n readonly: Whether the project is in read-only mode.\n \"\"\"\n self.root = Path(root).resolve()\n self.environment: Environment | None = environment\n self.readonly = readonly\n self.sys_dir_root = Path(\n os.getenv(PROJECT_SYS_DIR_ROOT_ENV, self.root / \".meltano\"),\n ).resolve()\n\n def refresh(self, **kwargs) -> None:\n \"\"\"Refresh the project instance to reflect external changes.\n\n This should be called whenever env vars change, project files change,\n or other significant changes to the outside world occur.\n\n Args:\n kwargs: Keyword arguments for the new instance. These overwrite the\n defaults provided by the current instance. For example, if a\n Meltano environment has been activated, the project can be\n refreshed with this new environment by running\n `project.refresh(environment=environment)`.\n \"\"\"\n kwargs = {\n \"root\": self.root,\n \"environment\": self.environment,\n \"readonly\": self.readonly,\n **kwargs,\n }\n cls = type(self) # noqa: WPS117\n # Clear the dictionary backing `self` to invalidate outdated info,\n # cached properties, etc., then instantiate an up-to-date instance,\n # then steal its attributes to update the dictionary backing `self`.\n # This trick makes it as if the instance was just created, yet keeps\n # all existing references to it valid.\n self.__dict__.clear()\n self.__dict__.update(cls(**kwargs).__dict__)\n\n @cached_property\n def config_service(self):\n \"\"\"Get the project config service.\n\n Returns:\n A `ConfigService` instance for this project.\n \"\"\"\n return ConfigService(self)\n\n @cached_property\n def project_files(self) -> ProjectFiles:\n \"\"\"Return a singleton `ProjectFiles` file manager instance.\n\n Returns:\n `ProjectFiles` file manager.\n \"\"\"\n return ProjectFiles(root=self.root, meltano_file_path=self.meltanofile)\n\n @cached_property\n def settings(self):\n \"\"\"Get the project settings.\n\n Returns:\n A `ProjectSettingsService` instance for this project.\n \"\"\"\n return ProjectSettingsService(self)\n\n @cached_property\n def plugins(self):\n \"\"\"Get the project plugins.\n\n Returns:\n A `ProjectPluginsService` instance for this project.\n \"\"\"\n return ProjectPluginsService(self)\n\n @cached_property\n def hub_service(self):\n \"\"\"Get the Meltano Hub service.\n\n Returns:\n A `MeltanoHubService` instance for this project.\n \"\"\"\n return MeltanoHubService(self)\n\n @cached_property\n def _meltano_interprocess_lock(self):\n return fasteners.InterProcessLock(self.run_dir(\"meltano.yml.lock\"))\n\n @property\n def env(self):\n \"\"\"Get environment variables for this project.\n\n Returns:\n dict of environment variables and values for this project.\n \"\"\"\n environment_name = self.environment.name if self.environment else \"\"\n return {\n PROJECT_ROOT_ENV: str(self.root),\n PROJECT_ENVIRONMENT_ENV: environment_name,\n PROJECT_SYS_DIR_ROOT_ENV: str(self.sys_dir_root),\n }\n\n @classmethod\n @fasteners.locked(lock=\"_activate_lock\")\n def activate(cls, project: Project):\n \"\"\"Activate the given Project.\n\n Args:\n project: the Project to activate\n\n Raises:\n OSError: if project cannot be activated due to unsupported OS\n \"\"\"\n import ctypes\n\n project.ensure_compatible()\n\n # create a symlink to our current binary\n try:\n # check if running on Windows\n if os.name == \"nt\":\n executable = Path(sys.executable).parent / \"meltano.exe\"\n # Admin privileges are required to create symlinks on Windows\n if ctypes.windll.shell32.IsUserAnAdmin():\n if executable.is_file():\n project.run_dir().joinpath(\"bin\").symlink_to(executable)\n else:\n logger.warning(\n \"Could not create symlink: meltano.exe not \"\n f\"present in {str(Path(sys.executable).parent)}\",\n )\n else:\n logger.warning(\n \"Failed to create symlink to 'meltano.exe': \"\n \"administrator privilege required\",\n )\n else:\n executable = Path(sys.executable).parent / \"meltano\"\n if executable.is_file():\n project.run_dir().joinpath(\"bin\").symlink_to(executable)\n except FileExistsError:\n pass\n except OSError as error:\n if error.errno == errno.EOPNOTSUPP:\n logger.warning(\n f\"Could not create symlink: {error}\\nPlease make sure \"\n \"that the underlying filesystem supports symlinks.\",\n )\n else:\n raise\n\n logger.debug(f\"Activated project at {project.root}\")\n\n # set the default project\n cls._default = project\n\n @classmethod\n def deactivate(cls):\n \"\"\"Deactivate the given Project.\"\"\"\n cls._default = None\n\n @property\n def file_version(self):\n \"\"\"Get the version of Meltano found in this project's meltano.yml.\n\n Returns:\n the Project's meltano version\n \"\"\"\n return self.meltano.version\n\n @classmethod\n @fasteners.locked(lock=\"_find_lock\")\n def find(cls, project_root: Path | str | None = None, activate=True):\n \"\"\"Find a Project.\n\n Args:\n project_root: The path to the root directory of the project. If not\n supplied, infer from PROJECT_ROOT_ENV or the current working\n directory and it's parents.\n activate: Save the found project so that future calls to `find`\n will continue to use this project.\n\n Returns:\n the found project\n\n Raises:\n ProjectNotFound: if the provided `project_root` is not a Meltano\n project, or the current working directory is not a Meltano\n project or a subfolder of one.\n \"\"\"\n if cls._default:\n return cls._default\n\n readonly = truthy(os.getenv(PROJECT_READONLY_ENV, \"false\"))\n\n if project_root := project_root or os.getenv(PROJECT_ROOT_ENV):\n project = Project(project_root, readonly=readonly)\n if not project.meltanofile.exists():\n raise ProjectNotFound(project)\n else:\n for directory in walk_parent_directories():\n project = Project(directory, readonly=readonly)\n if project.meltanofile.exists():\n break\n if not project.meltanofile.exists():\n raise ProjectNotFound(Project(os.getcwd()))\n\n readonly = project.settings.get(\"project_readonly\")\n if readonly != project.readonly:\n project.refresh(readonly=readonly)\n\n if activate:\n cls.activate(project)\n\n return project\n\n @property\n def meltano(self) -> MeltanoFileTypeHint:\n \"\"\"Return a copy of the current meltano config.\n\n Raises:\n EmptyMeltanoFileException: The `meltano.yml` file is empty.\n\n Returns:\n The current meltano config.\n \"\"\"\n from meltano.core.meltano_file import MeltanoFile\n\n conf: dict[str, t.Any] = yaml.load(self.meltanofile)\n if conf is None:\n raise EmptyMeltanoFileException\n\n with self._meltano_rw_lock.read_lock():\n return MeltanoFile.parse(self.project_files.load())\n\n @contextmanager\n def meltano_update(self):\n \"\"\"Yield the current meltano configuration.\n\n Update the meltanofile if the context ends gracefully.\n\n Yields:\n the current meltano configuration\n\n Raises:\n ProjectReadonly: This project is readonly.\n Exception: The project files could not be updated.\n \"\"\"\n if self.readonly:\n raise ProjectReadonly\n\n from meltano.core.meltano_file import MeltanoFile\n\n with self._meltano_rw_lock.write_lock(), self._meltano_interprocess_lock:\n meltano_config = MeltanoFile.parse(self.project_files.load())\n yield meltano_config\n try:\n self.project_files.update(meltano_config.canonical())\n except Exception as err:\n logger.critical(\"Could not update meltano.yml: %s\", err) # noqa: WPS323\n raise\n\n self.refresh()\n\n def root_dir(self, *joinpaths):\n \"\"\"Return the root directory of this project, optionally joined with path.\n\n Args:\n joinpaths: list of subdirs and/or file to join to project root.\n\n Returns:\n project root joined with provided subdirs and/or file\n \"\"\"\n return self.root.joinpath(*joinpaths)\n\n @property\n def meltanofile(self):\n \"\"\"Get the path to this project's meltano.yml.\n\n Returns:\n the path to this project meltano.yml\n \"\"\"\n return self.root.joinpath(\"meltano.yml\")\n\n @property\n def dotenv(self):\n \"\"\"Get the path to this project's .env file.\n\n Returns:\n the path to this project's .env file\n \"\"\"\n return self.root.joinpath(\".env\")\n\n @cached_property\n def dotenv_env(self):\n \"\"\"Get values from this project's .env file.\n\n Returns:\n values found in this project's .env file\n \"\"\"\n return dotenv_values(self.dotenv)\n\n def activate_environment(self, name: str) -> None:\n \"\"\"Activate a Meltano environment.\n\n No-op if the active environment has the given name.\n\n Args:\n name: Name of the environment.\n \"\"\"\n if getattr(self.environment, \"name\", object()) != name:\n self.refresh(environment=Environment.find(self.meltano.environments, name))\n logger.info(f\"Environment {name!r} is active\")\n\n def deactivate_environment(self) -> None:\n \"\"\"Deactivate the currently active environment.\"\"\"\n if self.environment is not None:\n self.refresh(environment=None)\n\n @contextmanager\n def dotenv_update(self):\n \"\"\"Raise error if project is readonly.\n\n Used in context where .env files would be updated.\n\n Yields:\n the .env file\n\n Raises:\n ProjectReadonly: if the project is readonly\n \"\"\"\n if self.readonly:\n raise ProjectReadonly\n\n yield self.dotenv\n self.refresh()\n\n @makedirs\n def meltano_dir(self, *joinpaths):\n \"\"\"Path to the project `.meltano` directory.\n\n Args:\n joinpaths: Paths to join to the `.meltano` directory.\n\n Returns:\n Resolved path to `.meltano` dir optionally joined to given paths.\n \"\"\"\n return self.sys_dir_root.joinpath(*joinpaths)\n\n @makedirs\n def analyze_dir(self, *joinpaths):\n \"\"\"Path to the project `analyze` directory.\n\n Args:\n joinpaths: Paths to join to the `analyze` directory.\n\n Returns:\n Resolved path to `analyze` dir optionally joined to given paths.\n \"\"\"\n return self.root_dir(\"analyze\", *joinpaths)\n\n @makedirs\n def extract_dir(self, *joinpaths):\n \"\"\"Path to the project `extract` directory.\n\n Args:\n joinpaths: Paths to join to the `extract` directory.\n\n Returns:\n Resolved path to `extract` dir optionally joined to given paths.\n \"\"\"\n return self.root_dir(\"extract\", *joinpaths)\n\n @makedirs\n def venvs_dir(self, *prefixes):\n \"\"\"Path to a `venv` directory in `.meltano`.\n\n Args:\n prefixes: Paths to prepend to the `venv` directory in `.meltano`.\n\n Returns:\n Resolved path to `venv` dir optionally prepended with given prefixes.\n \"\"\"\n return self.meltano_dir(*prefixes, \"venv\")\n\n @makedirs\n def run_dir(self, *joinpaths):\n \"\"\"Path to the `run` directory in `.meltano`.\n\n Args:\n joinpaths: Paths to join to the `run` directory in `.meltano`.\n\n Returns:\n Resolved path to `run` dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(\"run\", *joinpaths)\n\n @makedirs\n def logs_dir(self, *joinpaths):\n \"\"\"Path to the `logs` directory in `.meltano`.\n\n Args:\n joinpaths: Paths to join to the `logs` directory in `.meltano`.\n\n Returns:\n Resolved path to `logs` dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(\"logs\", *joinpaths)\n\n @makedirs\n def job_dir(self, state_id, *joinpaths):\n \"\"\"Path to the `elt` directory in `.meltano/run`.\n\n Args:\n state_id: State ID of `run` dir.\n joinpaths: Paths to join to the `elt` directory in `.meltano`.\n\n Returns:\n Resolved path to `elt` dir optionally joined to given paths.\n \"\"\"\n return self.run_dir(\"elt\", sanitize_filename(state_id), *joinpaths)\n\n @makedirs\n def job_logs_dir(self, state_id, *joinpaths):\n \"\"\"Path to the `elt` directory in `.meltano/logs`.\n\n Args:\n state_id: State ID of `logs` dir.\n joinpaths: Paths to join to the `elt` directory in `.meltano/logs`.\n\n Returns:\n Resolved path to `elt` dir optionally joined to given paths.\n \"\"\"\n return self.logs_dir(\"elt\", sanitize_filename(state_id), *joinpaths)\n\n @makedirs\n def plugin_dir(self, plugin: PluginRef, *joinpaths):\n \"\"\"Path to the plugin installation directory in `.meltano`.\n\n Args:\n plugin: Plugin to retrieve or create directory for.\n joinpaths: Paths to join to the plugin installation directory in `.meltano`.\n\n Returns:\n Resolved path to plugin installation dir optionally joined to given paths.\n \"\"\"\n return self.meltano_dir(plugin.type, plugin.name, *joinpaths)\n\n @makedirs\n def root_plugins_dir(self, *joinpaths: str):\n \"\"\"Path to the project `plugins` directory.\n\n Args:\n joinpaths: Paths to join with the project `plugins` directory.\n\n Returns:\n Path to the project `plugins` directory.\n \"\"\"\n return self.root_dir(\"plugins\", *joinpaths)\n\n @makedirs\n def plugin_lock_path(\n self,\n plugin_type: str,\n plugin_name: str,\n variant_name: str | None = None,\n ):\n \"\"\"Path to the project lock file.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n Path to the plugin lock file.\n \"\"\"\n filename = f\"{plugin_name}\"\n\n if variant_name:\n filename = f\"{filename}--{variant_name}\"\n\n return self.root_plugins_dir(plugin_type, f\"{filename}.lock\")\n\n def __eq__(self, other):\n \"\"\"Project equivalence check.\n\n Args:\n other: The other Project instance to check against.\n\n Returns:\n True if Projects are equal.\n \"\"\"\n return self.root == getattr(other, \"root\", object())\n\n def __hash__(self):\n \"\"\"Project hash.\n\n Returns:\n Project hash.\n \"\"\"\n return self.root.__hash__() # noqa: WPS609\n", "path": "src/meltano/core/project.py" } ]
diff --git a/src/meltano/core/project.py b/src/meltano/core/project.py index 32a67f9ab5..869e88b1c4 100644 --- a/src/meltano/core/project.py +++ b/src/meltano/core/project.py @@ -372,7 +372,7 @@ def dotenv(self): """ return self.root.joinpath(".env") - @property + @cached_property def dotenv_env(self): """Get values from this project's .env file. diff --git a/tests/meltano/core/plugin/test_plugin_settings.py b/tests/meltano/core/plugin/test_plugin_settings.py index cb6dec7527..bbc61236d7 100644 --- a/tests/meltano/core/plugin/test_plugin_settings.py +++ b/tests/meltano/core/plugin/test_plugin_settings.py @@ -347,9 +347,9 @@ def test_as_env_custom( @pytest.mark.usefixtures("env_var") def test_namespace_as_env_prefix( self, - project, + project: Project, session, - target, + target: ProjectPlugin, plugin_settings_service_factory, ): subject = plugin_settings_service_factory(target) @@ -376,6 +376,7 @@ def assert_env_value(value, env_var): # Name prefix dotenv.unset_key(project.dotenv, "MOCK_SCHEMA") dotenv.set_key(project.dotenv, "TARGET_MOCK_SCHEMA", "name_prefix") + project.refresh() assert_env_value("name_prefix", "TARGET_MOCK_SCHEMA") config = subject.as_env(session=session) @@ -486,7 +487,7 @@ def test_store_meltano_yml(self, subject, project): @pytest.mark.order(2) @pytest.mark.usefixtures("tap") - def test_store_dotenv(self, subject, project): + def test_store_dotenv(self, subject: PluginSettingsService, project: Project): store = SettingValueStore.DOTENV assert not project.dotenv.exists() @@ -507,6 +508,7 @@ def test_store_dotenv(self, subject, project): ) dotenv.set_key(project.dotenv, "TAP_MOCK_BOOLEAN", "false") + project.refresh() assert subject.get_with_source("boolean") == (False, SettingValueStore.DOTENV) dotenv.unset_key(project.dotenv, "TAP_MOCK_BOOLEAN") @@ -548,8 +550,8 @@ def test_store_dotenv(self, subject, project): def test_env_var_expansion( self, session, - subject, - project, + subject: PluginSettingsService, + project: Project, monkeypatch, env_var, ): @@ -564,6 +566,7 @@ def test_env_var_expansion( dotenv.set_key(project.dotenv, "A", "rock") dotenv.set_key(project.dotenv, "B", "paper") dotenv.set_key(project.dotenv, "C", "scissors") + project.refresh() yml_config = { "var": "$VAR", diff --git a/tests/meltano/core/test_plugin_invoker.py b/tests/meltano/core/test_plugin_invoker.py index 82ad159788..4c8021eec0 100644 --- a/tests/meltano/core/test_plugin_invoker.py +++ b/tests/meltano/core/test_plugin_invoker.py @@ -29,6 +29,7 @@ async def test_env(self, project, tap, session, plugin_invoker_factory): project.dotenv.touch() dotenv.set_key(project.dotenv, "DUMMY_ENV_VAR", "from_dotenv") dotenv.set_key(project.dotenv, "TAP_MOCK_TEST", "from_dotenv") + project.refresh() subject = plugin_invoker_factory(tap) async with subject.prepared(session):
Textualize__textual-4100
Scrolling in long OptionList doesn't accompany keyboard navigation by default If you use the keyboard to navigate inside an `OptionList` and if you move past the visible options, there is no scrolling to accompany your movement (there should). https://github.com/Textualize/textual/assets/5621605/31e02474-b696-40d7-b300-b6348ecddd6d <details> <summary>App shown in video</summary> ```py from textual.app import App, ComposeResult from textual.widgets import OptionList from textual.widgets.option_list import Option class ListApp(App[None]): def compose(self) -> ComposeResult: yield OptionList(*[Option(f"This is option #{n}") for n in range(100)]) if __name__ == "__main__": ListApp().run() ``` </summary>
[ { "content": "\"\"\"Provides the core of a classic vertical bounce-bar option list.\n\nUseful as a lightweight list view (not to be confused with ListView, which\nis much richer but uses widgets for the items) and as the base for various\nforms of bounce-bar menu.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar, Iterable, NamedTuple\n\nfrom rich.console import RenderableType\nfrom rich.padding import Padding\nfrom rich.repr import Result\nfrom rich.rule import Rule\nfrom rich.style import Style\nfrom typing_extensions import Self, TypeAlias\n\nfrom .. import _widget_navigation\nfrom .._widget_navigation import Direction\nfrom ..binding import Binding, BindingType\nfrom ..events import Click, Idle, Leave, MouseMove\nfrom ..geometry import Region, Size\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..scroll_view import ScrollView\nfrom ..strip import Strip\n\n\nclass DuplicateID(Exception):\n \"\"\"Raised if a duplicate ID is used when adding options to an option list.\"\"\"\n\n\nclass OptionDoesNotExist(Exception):\n \"\"\"Raised when a request has been made for an option that doesn't exist.\"\"\"\n\n\nclass Option:\n \"\"\"Class that holds the details of an individual option.\"\"\"\n\n def __init__(\n self, prompt: RenderableType, id: str | None = None, disabled: bool = False\n ) -> None:\n \"\"\"Initialise the option.\n\n Args:\n prompt: The prompt for the option.\n id: The optional ID for the option.\n disabled: The initial enabled/disabled state. Enabled by default.\n \"\"\"\n self.__prompt = prompt\n self.__id = id\n self.disabled = disabled\n\n @property\n def prompt(self) -> RenderableType:\n \"\"\"The prompt for the option.\"\"\"\n return self.__prompt\n\n def set_prompt(self, prompt: RenderableType) -> None:\n \"\"\"Set the prompt for the option.\n\n Args:\n prompt: The new prompt for the option.\n \"\"\"\n self.__prompt = prompt\n\n @property\n def id(self) -> str | None:\n \"\"\"The optional ID for the option.\"\"\"\n return self.__id\n\n def __rich_repr__(self) -> Result:\n yield \"prompt\", self.prompt\n yield \"id\", self.id, None\n yield \"disabled\", self.disabled, False\n\n\nclass Separator:\n \"\"\"Class used to add a separator to an [OptionList][textual.widgets.OptionList].\"\"\"\n\n\nclass Line(NamedTuple):\n \"\"\"Class that holds a list of segments for the line of a option.\"\"\"\n\n segments: Strip\n \"\"\"The strip of segments that make up the line.\"\"\"\n\n option_index: int | None = None\n \"\"\"The index of the [Option][textual.widgets.option_list.Option] that this line is related to.\n\n If the line isn't related to an option this will be `None`.\n \"\"\"\n\n\nclass OptionLineSpan(NamedTuple):\n \"\"\"Class that holds the line span information for an option.\n\n An [Option][textual.widgets.option_list.Option] can have a prompt that\n spans multiple lines. Also, there's no requirement that every option in\n an option list has the same span information. So this structure is used\n to track the line that an option starts on, and how many lines it\n contains.\n \"\"\"\n\n first: int\n \"\"\"The line position for the start of the option..\"\"\"\n line_count: int\n \"\"\"The count of lines that make up the option.\"\"\"\n\n def __contains__(self, line: object) -> bool:\n # For this named tuple `in` will have a very specific meaning; but\n # to keep mypy and friends happy we need to accept an object as the\n # parameter. So, let's keep the type checkers happy but only accept\n # an int.\n assert isinstance(line, int)\n return line >= self.first and line < (self.first + self.line_count)\n\n\nOptionListContent: TypeAlias = \"Option | Separator\"\n\"\"\"The type of an item of content in the option list.\n\nThis type represents all of the types that will be found in the list of\ncontent of the option list after it has been processed for addition.\n\"\"\"\n\nNewOptionListContent: TypeAlias = \"OptionListContent | None | RenderableType\"\n\"\"\"The type of a new item of option list content to be added to an option list.\n\nThis type represents all of the types that will be accepted when adding new\ncontent to the option list. This is a superset of [`OptionListContent`][textual.types.OptionListContent].\n\"\"\"\n\n\nclass OptionList(ScrollView, can_focus=True):\n \"\"\"A vertical option list with bounce-bar highlighting.\"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"down\", \"cursor_down\", \"Down\", show=False),\n Binding(\"end\", \"last\", \"Last\", show=False),\n Binding(\"enter\", \"select\", \"Select\", show=False),\n Binding(\"home\", \"first\", \"First\", show=False),\n Binding(\"pagedown\", \"page_down\", \"Page Down\", show=False),\n Binding(\"pageup\", \"page_up\", \"Page Up\", show=False),\n Binding(\"up\", \"cursor_up\", \"Up\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | down | Move the highlight down. |\n | end | Move the highlight to the last option. |\n | enter | Select the current option. |\n | home | Move the highlight to the first option. |\n | pagedown | Move the highlight down a page of options. |\n | pageup | Move the highlight up a page of options. |\n | up | Move the highlight up. |\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"option-list--option\",\n \"option-list--option-disabled\",\n \"option-list--option-highlighted\",\n \"option-list--option-hover\",\n \"option-list--option-hover-highlighted\",\n \"option-list--separator\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `option-list--option-disabled` | Target disabled options. |\n | `option-list--option-highlighted` | Target the highlighted option. |\n | `option-list--option-hover` | Target an option that has the mouse over it. |\n | `option-list--option-hover-highlighted` | Target a highlighted option that has the mouse over it. |\n | `option-list--separator` | Target the separators. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n OptionList {\n height: auto;\n background: $boost;\n color: $text;\n overflow-x: hidden;\n border: tall transparent;\n padding: 0 1;\n }\n\n OptionList:focus {\n border: tall $accent;\n\n }\n\n OptionList > .option-list--separator {\n color: $foreground 15%;\n }\n\n OptionList > .option-list--option-highlighted {\n color: $text;\n text-style: bold;\n }\n\n OptionList:focus > .option-list--option-highlighted {\n background: $accent;\n }\n\n OptionList > .option-list--option-disabled {\n color: $text-disabled;\n }\n\n OptionList > .option-list--option-hover {\n background: $boost;\n }\n\n OptionList > .option-list--option-hover-highlighted {\n background: $accent 60%;\n color: $text;\n text-style: bold;\n }\n\n OptionList:focus > .option-list--option-hover-highlighted {\n background: $accent;\n color: $text;\n text-style: bold;\n }\n \"\"\"\n\n highlighted: reactive[int | None] = reactive[\"int | None\"](None)\n \"\"\"The index of the currently-highlighted option, or `None` if no option is highlighted.\"\"\"\n\n class OptionMessage(Message):\n \"\"\"Base class for all option messages.\"\"\"\n\n def __init__(self, option_list: OptionList, index: int) -> None:\n \"\"\"Initialise the option message.\n\n Args:\n option_list: The option list that owns the option.\n index: The index of the option that the message relates to.\n \"\"\"\n super().__init__()\n self.option_list: OptionList = option_list\n \"\"\"The option list that sent the message.\"\"\"\n self.option: Option = option_list.get_option_at_index(index)\n \"\"\"The highlighted option.\"\"\"\n self.option_id: str | None = self.option.id\n \"\"\"The ID of the option that the message relates to.\"\"\"\n self.option_index: int = index\n \"\"\"The index of the option that the message relates to.\"\"\"\n\n @property\n def control(self) -> OptionList:\n \"\"\"The option list that sent the message.\n\n This is an alias for [`OptionMessage.option_list`][textual.widgets.OptionList.OptionMessage.option_list]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.option_list\n\n def __rich_repr__(self) -> Result:\n yield \"option_list\", self.option_list\n yield \"option\", self.option\n yield \"option_id\", self.option_id\n yield \"option_index\", self.option_index\n\n class OptionHighlighted(OptionMessage):\n \"\"\"Message sent when an option is highlighted.\n\n Can be handled using `on_option_list_option_highlighted` in a subclass of\n `OptionList` or in a parent node in the DOM.\n \"\"\"\n\n class OptionSelected(OptionMessage):\n \"\"\"Message sent when an option is selected.\n\n Can be handled using `on_option_list_option_selected` in a subclass of\n `OptionList` or in a parent node in the DOM.\n \"\"\"\n\n def __init__(\n self,\n *content: NewOptionListContent,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n wrap: bool = True,\n ):\n \"\"\"Initialise the option list.\n\n Args:\n *content: The content for the option list.\n name: The name of the option list.\n id: The ID of the option list in the DOM.\n classes: The CSS classes of the option list.\n disabled: Whether the option list is disabled or not.\n wrap: Should prompts be auto-wrapped?\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n\n # Internal refresh trackers. For things driven from on_idle.\n self._needs_refresh_content_tracking = False\n self._needs_to_scroll_to_highlight = False\n\n self._wrap = wrap\n \"\"\"Should we auto-wrap options?\n\n If `False` options wider than the list will be truncated.\n \"\"\"\n\n self._contents: list[OptionListContent] = [\n self._make_content(item) for item in content\n ]\n \"\"\"A list of the content of the option list.\n\n This is *every* item that makes up the content of the option list;\n this includes both the options *and* the separators (and any other\n decoration we could end up adding -- although I don't anticipate\n anything else at the moment; but padding around separators could be\n a thing, perhaps).\n \"\"\"\n\n self._options: list[Option] = [\n content for content in self._contents if isinstance(content, Option)\n ]\n \"\"\"A list of the options within the option list.\n\n This is a list of references to just the options alone, ignoring the\n separators and potentially any other line-oriented option list\n content that isn't an option.\n \"\"\"\n\n self._option_ids: dict[str, int] = {\n option.id: index for index, option in enumerate(self._options) if option.id\n }\n \"\"\"A dictionary of option IDs and the option indexes they relate to.\"\"\"\n\n self._lines: list[Line] = []\n \"\"\"A list of all of the individual lines that make up the option list.\n\n Note that the size of this list will be at least the same as the number\n of options, and actually greater if any prompt of any option is\n multiple lines.\n \"\"\"\n\n self._spans: list[OptionLineSpan] = []\n \"\"\"A list of the locations and sizes of all options in the option list.\n\n This will be the same size as the number of prompts; each entry in\n the list contains the line offset of the start of the prompt, and\n the count of the lines in the prompt.\n \"\"\"\n\n # Initial calculation of the content tracking.\n self._request_content_tracking_refresh()\n\n self._mouse_hovering_over: int | None = None\n \"\"\"Used to track what the mouse is hovering over.\"\"\"\n\n # Finally, cause the highlighted property to settle down based on\n # the state of the option list in regard to its available options.\n self.action_first()\n\n def _request_content_tracking_refresh(\n self, rescroll_to_highlight: bool = False\n ) -> None:\n \"\"\"Request that the content tracking information gets refreshed.\n\n Args:\n rescroll_to_highlight: Should the widget ensure the highlight is visible?\n\n Calling this method sets a flag to say the refresh should happen,\n and books the refresh call in for the next idle moment.\n \"\"\"\n self._needs_refresh_content_tracking = True\n self._needs_to_scroll_to_highlight = rescroll_to_highlight\n self.check_idle()\n\n async def _on_idle(self, _: Idle) -> None:\n \"\"\"Perform content tracking data refresh when idle.\"\"\"\n self._refresh_content_tracking()\n if self._needs_to_scroll_to_highlight:\n self._needs_to_scroll_to_highlight = False\n self.scroll_to_highlight()\n\n def watch_show_vertical_scrollbar(self) -> None:\n \"\"\"Handle the vertical scrollbar visibility status changing.\n\n `show_vertical_scrollbar` is watched because it has an impact on the\n available width in which to render the renderables that make up the\n options in the list. If a vertical scrollbar appears or disappears\n we need to recalculate all the lines that make up the list.\n \"\"\"\n self._request_content_tracking_refresh()\n\n def _on_resize(self) -> None:\n \"\"\"Refresh the layout of the renderables in the list when resized.\"\"\"\n self._request_content_tracking_refresh(rescroll_to_highlight=True)\n\n def _on_mouse_move(self, event: MouseMove) -> None:\n \"\"\"React to the mouse moving.\n\n Args:\n event: The mouse movement event.\n \"\"\"\n self._mouse_hovering_over = event.style.meta.get(\"option\")\n\n def _on_leave(self, _: Leave) -> None:\n \"\"\"React to the mouse leaving the widget.\"\"\"\n self._mouse_hovering_over = None\n\n async def _on_click(self, event: Click) -> None:\n \"\"\"React to the mouse being clicked on an item.\n\n Args:\n event: The click event.\n \"\"\"\n clicked_option: int | None = event.style.meta.get(\"option\")\n if clicked_option is not None and not self._options[clicked_option].disabled:\n self.highlighted = clicked_option\n self.action_select()\n\n def _make_content(self, content: NewOptionListContent) -> OptionListContent:\n \"\"\"Convert a single item of content for the list into a content type.\n\n Args:\n content: The content to turn into a full option list type.\n\n Returns:\n The content, usable in the option list.\n \"\"\"\n if isinstance(content, (Option, Separator)):\n return content\n if content is None:\n return Separator()\n return Option(content)\n\n def _clear_content_tracking(self) -> None:\n \"\"\"Clear down the content tracking information.\"\"\"\n self._lines.clear()\n self._spans.clear()\n\n def _left_gutter_width(self) -> int:\n \"\"\"Returns the size of any left gutter that should be taken into account.\n\n Returns:\n The width of the left gutter.\n \"\"\"\n return 0\n\n def _refresh_content_tracking(self, force: bool = False) -> None:\n \"\"\"Refresh the various forms of option list content tracking.\n\n Args:\n force: Optionally force the refresh.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n\n Without a `force` the refresh will only take place if it has been\n requested via `_refresh_content_tracking`.\n \"\"\"\n\n # If we don't need to refresh, don't bother.\n if not self._needs_refresh_content_tracking and not force:\n return\n\n # If we don't know our own width yet, we can't sensibly work out the\n # heights of the prompts of the options yet, so let's shortcut that\n # work. We'll be back here once we know our height.\n if not self.size.width:\n return\n\n self._clear_content_tracking()\n self._needs_refresh_content_tracking = False\n\n # Set up for doing less property access work inside the loop.\n lines_from = self.app.console.render_lines\n add_span = self._spans.append\n add_lines = self._lines.extend\n\n # Adjust the options for our purposes.\n options = self.app.console.options.update_width(\n self.scrollable_content_region.width - self._left_gutter_width()\n )\n options.no_wrap = not self._wrap\n if not self._wrap:\n options.overflow = \"ellipsis\"\n\n # Create a rule that can be used as a separator.\n separator = Strip(lines_from(Rule(style=\"\"))[0])\n\n # Work through each item that makes up the content of the list,\n # break out the individual lines that will be used to draw it, and\n # also set up the tracking of the actual options.\n line = 0\n option_index = 0\n padding = self.get_component_styles(\"option-list--option\").padding\n for content in self._contents:\n if isinstance(content, Option):\n # The content is an option, so render out the prompt and\n # work out the lines needed to show it.\n new_lines = [\n Line(\n Strip(prompt_line).apply_style(\n Style(meta={\"option\": option_index})\n ),\n option_index,\n )\n for prompt_line in lines_from(\n Padding(content.prompt, padding) if padding else content.prompt,\n options,\n )\n ]\n # Record the span information for the option.\n add_span(OptionLineSpan(line, len(new_lines)))\n option_index += 1\n else:\n # The content isn't an option, so it must be a separator (if\n # there were to be other non-option content for an option\n # list it's in this if/else where we'd process it).\n new_lines = [Line(separator)]\n add_lines(new_lines)\n line += len(new_lines)\n\n # Now that we know how many lines make up the whole content of the\n # list, set the virtual size.\n self.virtual_size = Size(self.scrollable_content_region.width, len(self._lines))\n\n def _duplicate_id_check(self, candidate_items: list[OptionListContent]) -> None:\n \"\"\"Check the items to be added for any duplicates.\n\n Args:\n candidate_items: The items that are going be added.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n \"\"\"\n # We're only interested in options, and only those that have IDs.\n new_options = [\n item\n for item in candidate_items\n if isinstance(item, Option) and item.id is not None\n ]\n # Get the set of new IDs that we're being given.\n new_option_ids = {option.id for option in new_options}\n # Now check for duplicates, both internally amongst the new items\n # incoming, and also against all the current known IDs.\n if len(new_options) != len(new_option_ids) or not new_option_ids.isdisjoint(\n self._option_ids\n ):\n raise DuplicateID(\"Attempt made to add options with duplicate IDs.\")\n\n def add_options(self, items: Iterable[NewOptionListContent]) -> Self:\n \"\"\"Add new options to the end of the option list.\n\n Args:\n items: The new items to add.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n\n Note:\n All options are checked for duplicate IDs *before* any option is\n added. A duplicate ID will cause none of the passed items to be\n added to the option list.\n \"\"\"\n # Only work if we have items to add; but don't make a fuss out of\n # zero items to add, just carry on like nothing happened.\n if items:\n # Turn any incoming values into valid content for the list.\n content = [self._make_content(item) for item in items]\n self._duplicate_id_check(content)\n self._contents.extend(content)\n # Pull out the content that is genuine options, create any new\n # ID mappings required, then add the new options to the option\n # list.\n new_options = [item for item in content if isinstance(item, Option)]\n for new_option_index, new_option in enumerate(\n new_options, start=len(self._options)\n ):\n if new_option.id:\n self._option_ids[new_option.id] = new_option_index\n self._options.extend(new_options)\n\n self._refresh_content_tracking(force=True)\n self.refresh()\n return self\n\n def add_option(self, item: NewOptionListContent = None) -> Self:\n \"\"\"Add a new option to the end of the option list.\n\n Args:\n item: The new item to add.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n \"\"\"\n return self.add_options([item])\n\n def _remove_option(self, index: int) -> None:\n \"\"\"Remove an option from the option list.\n\n Args:\n index: The index of the item to remove.\n\n Raises:\n IndexError: If there is no option of the given index.\n \"\"\"\n option = self._options[index]\n del self._options[index]\n del self._contents[self._contents.index(option)]\n # Decrement index of options after the one we just removed.\n self._option_ids = {\n option_id: option_index - 1 if option_index > index else option_index\n for option_id, option_index in self._option_ids.items()\n if option_index != index\n }\n self._refresh_content_tracking(force=True)\n # Force a re-validation of the highlight.\n self.highlighted = self.highlighted\n self._mouse_hovering_over = None\n self.refresh()\n\n def remove_option(self, option_id: str) -> Self:\n \"\"\"Remove the option with the given ID.\n\n Args:\n option_id: The ID of the option to remove.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n self._remove_option(self.get_option_index(option_id))\n return self\n\n def remove_option_at_index(self, index: int) -> Self:\n \"\"\"Remove the option at the given index.\n\n Args:\n index: The index of the option to remove.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n self._remove_option(index)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index!r}\"\n ) from None\n return self\n\n def _replace_option_prompt(self, index: int, prompt: RenderableType) -> None:\n \"\"\"Replace the prompt of an option in the list.\n\n Args:\n index: The index of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n self.get_option_at_index(index).set_prompt(prompt)\n self._refresh_content_tracking(force=True)\n self.refresh()\n\n def replace_option_prompt(self, option_id: str, prompt: RenderableType) -> Self:\n \"\"\"Replace the prompt of the option with the given ID.\n\n Args:\n option_id: The ID of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n self._replace_option_prompt(self.get_option_index(option_id), prompt)\n return self\n\n def replace_option_prompt_at_index(\n self, index: int, prompt: RenderableType\n ) -> Self:\n \"\"\"Replace the prompt of the option at the given index.\n\n Args:\n index: The index of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n self._replace_option_prompt(index, prompt)\n return self\n\n def clear_options(self) -> Self:\n \"\"\"Clear the content of the option list.\n\n Returns:\n The `OptionList` instance.\n \"\"\"\n self._contents.clear()\n self._options.clear()\n self._option_ids.clear()\n self.highlighted = None\n self._mouse_hovering_over = None\n self.virtual_size = Size(self.scrollable_content_region.width, 0)\n self._refresh_content_tracking(force=True)\n return self\n\n def _set_option_disabled(self, index: int, disabled: bool) -> Self:\n \"\"\"Set the disabled state of an option in the list.\n\n Args:\n index: The index of the option to set the disabled state of.\n disabled: The disabled state to set.\n\n Returns:\n The `OptionList` instance.\n \"\"\"\n self._options[index].disabled = disabled\n if index == self.highlighted:\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options, anchor=index, direction=1\n )\n # TODO: Refresh only if the affected option is visible.\n self.refresh()\n return self\n\n def enable_option_at_index(self, index: int) -> Self:\n \"\"\"Enable the option at the given index.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._set_option_disabled(index, False)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def disable_option_at_index(self, index: int) -> Self:\n \"\"\"Disable the option at the given index.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._set_option_disabled(index, True)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def enable_option(self, option_id: str) -> Self:\n \"\"\"Enable the option with the given ID.\n\n Args:\n option_id: The ID of the option to enable.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.enable_option_at_index(self.get_option_index(option_id))\n\n def disable_option(self, option_id: str) -> Self:\n \"\"\"Disable the option with the given ID.\n\n Args:\n option_id: The ID of the option to disable.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.disable_option_at_index(self.get_option_index(option_id))\n\n @property\n def option_count(self) -> int:\n \"\"\"The count of options.\"\"\"\n return len(self._options)\n\n def get_option_at_index(self, index: int) -> Option:\n \"\"\"Get the option at the given index.\n\n Args:\n index: The index of the option to get.\n\n Returns:\n The option at that index.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._options[index]\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def get_option(self, option_id: str) -> Option:\n \"\"\"Get the option with the given ID.\n\n Args:\n option_id: The ID of the option to get.\n\n Returns:\n The option with the ID.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.get_option_at_index(self.get_option_index(option_id))\n\n def get_option_index(self, option_id: str) -> int:\n \"\"\"Get the index of the option with the given ID.\n\n Args:\n option_id: The ID of the option to get the index of.\n\n Returns:\n The index of the item with the given ID.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n try:\n return self._option_ids[option_id]\n except KeyError:\n raise OptionDoesNotExist(\n f\"There is no option with an ID of '{option_id}'\"\n ) from None\n\n def render_line(self, y: int) -> Strip:\n \"\"\"Render a single line in the option list.\n\n Args:\n y: The Y offset of the line to render.\n\n Returns:\n A `Strip` instance for the caller to render.\n \"\"\"\n\n scroll_x, scroll_y = self.scroll_offset\n\n # First off, work out which line we're working on, based off the\n # current scroll offset plus the line we're being asked to render.\n line_number = scroll_y + y\n try:\n line = self._lines[line_number]\n except IndexError:\n # An IndexError means we're drawing in an option list where\n # there's more list than there are options.\n return Strip([])\n\n # Now that we know which line we're on, pull out the option index so\n # we have a \"local\" copy to refer to rather than needing to do a\n # property access multiple times.\n option_index = line.option_index\n\n # Knowing which line we're going to be drawing, we can now go pull\n # the relevant segments for the line of that particular prompt.\n strip = line.segments\n\n # If the line we're looking at isn't associated with an option, it\n # will be a separator, so let's exit early with that.\n if option_index is None:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--separator\")\n )\n\n # At this point we know we're drawing actual content. To allow for\n # horizontal scrolling, let's crop the strip at the right locations.\n strip = strip.crop(scroll_x, scroll_x + self.scrollable_content_region.width)\n\n highlighted = self.highlighted\n mouse_over = self._mouse_hovering_over\n spans = self._spans\n\n # Handle drawing a disabled option.\n if self._options[option_index].disabled:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-disabled\")\n )\n\n # Handle drawing a highlighted option.\n if highlighted is not None and line_number in spans[highlighted]:\n # Highlighted with the mouse over it?\n if option_index == mouse_over:\n return strip.apply_style(\n self.get_component_rich_style(\n \"option-list--option-hover-highlighted\"\n )\n )\n # Just a normal highlight.\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-highlighted\")\n )\n\n # Perhaps the line is within an otherwise-uninteresting option that\n # has the mouse hovering over it?\n if mouse_over is not None and line_number in spans[mouse_over]:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-hover\")\n )\n\n # It's a normal option line.\n return strip.apply_style(self.rich_style)\n\n def scroll_to_highlight(self, top: bool = False) -> None:\n \"\"\"Ensure that the highlighted option is in view.\n\n Args:\n top: Scroll highlight to top of the list.\n \"\"\"\n highlighted = self.highlighted\n if highlighted is None:\n return\n try:\n span = self._spans[highlighted]\n except IndexError:\n # Index error means we're being asked to scroll to a highlight\n # before all the tracking information has been worked out.\n # That's fine; let's just NoP that.\n return\n self.scroll_to_region(\n Region(\n 0, span.first, self.scrollable_content_region.width, span.line_count\n ),\n force=True,\n animate=False,\n top=top,\n )\n\n def validate_highlighted(self, highlighted: int | None) -> int | None:\n \"\"\"Validate the `highlighted` property value on access.\"\"\"\n if highlighted is None or not self._options:\n return None\n elif highlighted < 0:\n return 0\n elif highlighted >= len(self._options):\n return len(self._options) - 1\n\n return highlighted\n\n def watch_highlighted(self, highlighted: int | None) -> None:\n \"\"\"React to the highlighted option having changed.\"\"\"\n if highlighted is not None and not self._options[highlighted].disabled:\n self.scroll_to_highlight()\n self.post_message(self.OptionHighlighted(self, highlighted))\n\n def action_cursor_up(self) -> None:\n \"\"\"Move the highlight up to the previous enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options,\n anchor=self.highlighted,\n direction=-1,\n )\n\n def action_cursor_down(self) -> None:\n \"\"\"Move the highlight down to the next enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options,\n anchor=self.highlighted,\n direction=1,\n )\n\n def action_first(self) -> None:\n \"\"\"Move the highlight to the first enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_first_enabled(self._options)\n\n def action_last(self) -> None:\n \"\"\"Move the highlight to the last enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_last_enabled(self._options)\n\n def _page(self, direction: Direction) -> None:\n \"\"\"Move the highlight roughly by one page in the given direction.\n\n The highlight will tentatively move by exactly one page.\n If this would result in highlighting a disabled option, instead we look for\n an enabled option \"further down\" the list of options.\n If there are no such enabled options, we fallback to the \"last\" enabled option.\n (The meaning of \"further down\" and \"last\" depend on the direction specified.)\n\n Args:\n direction: The direction to head, -1 for up and 1 for down.\n \"\"\"\n\n # If we find ourselves in a position where we don't know where we're\n # going, we need a fallback location. Where we go will depend on the\n # direction.\n fallback = self.action_first if direction == -1 else self.action_last\n\n highlighted = self.highlighted\n if highlighted is None:\n # There is no highlight yet so let's go to the default position.\n fallback()\n else:\n # We want to page roughly by lines, but we're dealing with\n # options that can be a varying number of lines in height. So\n # let's start with the target line alone.\n target_line = max(\n 0,\n self._spans[highlighted].first\n + (direction * self.scrollable_content_region.height),\n )\n try:\n # Now that we've got a target line, let's figure out the\n # index of the target option.\n target_option = self._lines[target_line].option_index\n except IndexError:\n # An index error suggests we've gone out of bounds, let's\n # settle on whatever the call thinks is a good place to wrap\n # to.\n fallback()\n else:\n # Looks like we've figured where we'd like to jump to, we\n # just need to make sure we jump to an option that's enabled.\n if target_option is not None:\n target_option = _widget_navigation.find_next_enabled_no_wrap(\n candidates=self._options,\n anchor=target_option,\n direction=direction,\n with_anchor=True,\n )\n # If we couldn't find an enabled option that's at least one page\n # away from the current one, we instead move less than one page\n # to the last enabled option in the correct direction.\n if target_option is None:\n fallback()\n else:\n self.highlighted = target_option\n\n def action_page_up(self) -> None:\n \"\"\"Move the highlight up roughly by one page.\"\"\"\n self._page(-1)\n\n def action_page_down(self) -> None:\n \"\"\"Move the highlight down roughly by one page.\"\"\"\n self._page(1)\n\n def action_select(self) -> None:\n \"\"\"Select the currently-highlighted option.\n\n If no option is selected, then nothing happens. If an option is\n selected, a [OptionList.OptionSelected][textual.widgets.OptionList.OptionSelected]\n message will be posted.\n \"\"\"\n highlighted = self.highlighted\n if highlighted is not None and not self._options[highlighted].disabled:\n self.post_message(self.OptionSelected(self, highlighted))\n", "path": "src/textual/widgets/_option_list.py" } ]
[ { "content": "\"\"\"Provides the core of a classic vertical bounce-bar option list.\n\nUseful as a lightweight list view (not to be confused with ListView, which\nis much richer but uses widgets for the items) and as the base for various\nforms of bounce-bar menu.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar, Iterable, NamedTuple\n\nfrom rich.console import RenderableType\nfrom rich.padding import Padding\nfrom rich.repr import Result\nfrom rich.rule import Rule\nfrom rich.style import Style\nfrom typing_extensions import Self, TypeAlias\n\nfrom .. import _widget_navigation\nfrom .._widget_navigation import Direction\nfrom ..binding import Binding, BindingType\nfrom ..events import Click, Idle, Leave, MouseMove\nfrom ..geometry import Region, Size\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..scroll_view import ScrollView\nfrom ..strip import Strip\n\n\nclass DuplicateID(Exception):\n \"\"\"Raised if a duplicate ID is used when adding options to an option list.\"\"\"\n\n\nclass OptionDoesNotExist(Exception):\n \"\"\"Raised when a request has been made for an option that doesn't exist.\"\"\"\n\n\nclass Option:\n \"\"\"Class that holds the details of an individual option.\"\"\"\n\n def __init__(\n self, prompt: RenderableType, id: str | None = None, disabled: bool = False\n ) -> None:\n \"\"\"Initialise the option.\n\n Args:\n prompt: The prompt for the option.\n id: The optional ID for the option.\n disabled: The initial enabled/disabled state. Enabled by default.\n \"\"\"\n self.__prompt = prompt\n self.__id = id\n self.disabled = disabled\n\n @property\n def prompt(self) -> RenderableType:\n \"\"\"The prompt for the option.\"\"\"\n return self.__prompt\n\n def set_prompt(self, prompt: RenderableType) -> None:\n \"\"\"Set the prompt for the option.\n\n Args:\n prompt: The new prompt for the option.\n \"\"\"\n self.__prompt = prompt\n\n @property\n def id(self) -> str | None:\n \"\"\"The optional ID for the option.\"\"\"\n return self.__id\n\n def __rich_repr__(self) -> Result:\n yield \"prompt\", self.prompt\n yield \"id\", self.id, None\n yield \"disabled\", self.disabled, False\n\n\nclass Separator:\n \"\"\"Class used to add a separator to an [OptionList][textual.widgets.OptionList].\"\"\"\n\n\nclass Line(NamedTuple):\n \"\"\"Class that holds a list of segments for the line of a option.\"\"\"\n\n segments: Strip\n \"\"\"The strip of segments that make up the line.\"\"\"\n\n option_index: int | None = None\n \"\"\"The index of the [Option][textual.widgets.option_list.Option] that this line is related to.\n\n If the line isn't related to an option this will be `None`.\n \"\"\"\n\n\nclass OptionLineSpan(NamedTuple):\n \"\"\"Class that holds the line span information for an option.\n\n An [Option][textual.widgets.option_list.Option] can have a prompt that\n spans multiple lines. Also, there's no requirement that every option in\n an option list has the same span information. So this structure is used\n to track the line that an option starts on, and how many lines it\n contains.\n \"\"\"\n\n first: int\n \"\"\"The line position for the start of the option..\"\"\"\n line_count: int\n \"\"\"The count of lines that make up the option.\"\"\"\n\n def __contains__(self, line: object) -> bool:\n # For this named tuple `in` will have a very specific meaning; but\n # to keep mypy and friends happy we need to accept an object as the\n # parameter. So, let's keep the type checkers happy but only accept\n # an int.\n assert isinstance(line, int)\n return line >= self.first and line < (self.first + self.line_count)\n\n\nOptionListContent: TypeAlias = \"Option | Separator\"\n\"\"\"The type of an item of content in the option list.\n\nThis type represents all of the types that will be found in the list of\ncontent of the option list after it has been processed for addition.\n\"\"\"\n\nNewOptionListContent: TypeAlias = \"OptionListContent | None | RenderableType\"\n\"\"\"The type of a new item of option list content to be added to an option list.\n\nThis type represents all of the types that will be accepted when adding new\ncontent to the option list. This is a superset of [`OptionListContent`][textual.types.OptionListContent].\n\"\"\"\n\n\nclass OptionList(ScrollView, can_focus=True):\n \"\"\"A vertical option list with bounce-bar highlighting.\"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"down\", \"cursor_down\", \"Down\", show=False),\n Binding(\"end\", \"last\", \"Last\", show=False),\n Binding(\"enter\", \"select\", \"Select\", show=False),\n Binding(\"home\", \"first\", \"First\", show=False),\n Binding(\"pagedown\", \"page_down\", \"Page Down\", show=False),\n Binding(\"pageup\", \"page_up\", \"Page Up\", show=False),\n Binding(\"up\", \"cursor_up\", \"Up\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | down | Move the highlight down. |\n | end | Move the highlight to the last option. |\n | enter | Select the current option. |\n | home | Move the highlight to the first option. |\n | pagedown | Move the highlight down a page of options. |\n | pageup | Move the highlight up a page of options. |\n | up | Move the highlight up. |\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"option-list--option\",\n \"option-list--option-disabled\",\n \"option-list--option-highlighted\",\n \"option-list--option-hover\",\n \"option-list--option-hover-highlighted\",\n \"option-list--separator\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `option-list--option-disabled` | Target disabled options. |\n | `option-list--option-highlighted` | Target the highlighted option. |\n | `option-list--option-hover` | Target an option that has the mouse over it. |\n | `option-list--option-hover-highlighted` | Target a highlighted option that has the mouse over it. |\n | `option-list--separator` | Target the separators. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n OptionList {\n height: auto;\n max-height: 100%;\n background: $boost;\n color: $text;\n overflow-x: hidden;\n border: tall transparent;\n padding: 0 1;\n }\n\n OptionList:focus {\n border: tall $accent;\n\n }\n\n OptionList > .option-list--separator {\n color: $foreground 15%;\n }\n\n OptionList > .option-list--option-highlighted {\n color: $text;\n text-style: bold;\n }\n\n OptionList:focus > .option-list--option-highlighted {\n background: $accent;\n }\n\n OptionList > .option-list--option-disabled {\n color: $text-disabled;\n }\n\n OptionList > .option-list--option-hover {\n background: $boost;\n }\n\n OptionList > .option-list--option-hover-highlighted {\n background: $accent 60%;\n color: $text;\n text-style: bold;\n }\n\n OptionList:focus > .option-list--option-hover-highlighted {\n background: $accent;\n color: $text;\n text-style: bold;\n }\n \"\"\"\n\n highlighted: reactive[int | None] = reactive[\"int | None\"](None)\n \"\"\"The index of the currently-highlighted option, or `None` if no option is highlighted.\"\"\"\n\n class OptionMessage(Message):\n \"\"\"Base class for all option messages.\"\"\"\n\n def __init__(self, option_list: OptionList, index: int) -> None:\n \"\"\"Initialise the option message.\n\n Args:\n option_list: The option list that owns the option.\n index: The index of the option that the message relates to.\n \"\"\"\n super().__init__()\n self.option_list: OptionList = option_list\n \"\"\"The option list that sent the message.\"\"\"\n self.option: Option = option_list.get_option_at_index(index)\n \"\"\"The highlighted option.\"\"\"\n self.option_id: str | None = self.option.id\n \"\"\"The ID of the option that the message relates to.\"\"\"\n self.option_index: int = index\n \"\"\"The index of the option that the message relates to.\"\"\"\n\n @property\n def control(self) -> OptionList:\n \"\"\"The option list that sent the message.\n\n This is an alias for [`OptionMessage.option_list`][textual.widgets.OptionList.OptionMessage.option_list]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.option_list\n\n def __rich_repr__(self) -> Result:\n yield \"option_list\", self.option_list\n yield \"option\", self.option\n yield \"option_id\", self.option_id\n yield \"option_index\", self.option_index\n\n class OptionHighlighted(OptionMessage):\n \"\"\"Message sent when an option is highlighted.\n\n Can be handled using `on_option_list_option_highlighted` in a subclass of\n `OptionList` or in a parent node in the DOM.\n \"\"\"\n\n class OptionSelected(OptionMessage):\n \"\"\"Message sent when an option is selected.\n\n Can be handled using `on_option_list_option_selected` in a subclass of\n `OptionList` or in a parent node in the DOM.\n \"\"\"\n\n def __init__(\n self,\n *content: NewOptionListContent,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n wrap: bool = True,\n ):\n \"\"\"Initialise the option list.\n\n Args:\n *content: The content for the option list.\n name: The name of the option list.\n id: The ID of the option list in the DOM.\n classes: The CSS classes of the option list.\n disabled: Whether the option list is disabled or not.\n wrap: Should prompts be auto-wrapped?\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n\n # Internal refresh trackers. For things driven from on_idle.\n self._needs_refresh_content_tracking = False\n self._needs_to_scroll_to_highlight = False\n\n self._wrap = wrap\n \"\"\"Should we auto-wrap options?\n\n If `False` options wider than the list will be truncated.\n \"\"\"\n\n self._contents: list[OptionListContent] = [\n self._make_content(item) for item in content\n ]\n \"\"\"A list of the content of the option list.\n\n This is *every* item that makes up the content of the option list;\n this includes both the options *and* the separators (and any other\n decoration we could end up adding -- although I don't anticipate\n anything else at the moment; but padding around separators could be\n a thing, perhaps).\n \"\"\"\n\n self._options: list[Option] = [\n content for content in self._contents if isinstance(content, Option)\n ]\n \"\"\"A list of the options within the option list.\n\n This is a list of references to just the options alone, ignoring the\n separators and potentially any other line-oriented option list\n content that isn't an option.\n \"\"\"\n\n self._option_ids: dict[str, int] = {\n option.id: index for index, option in enumerate(self._options) if option.id\n }\n \"\"\"A dictionary of option IDs and the option indexes they relate to.\"\"\"\n\n self._lines: list[Line] = []\n \"\"\"A list of all of the individual lines that make up the option list.\n\n Note that the size of this list will be at least the same as the number\n of options, and actually greater if any prompt of any option is\n multiple lines.\n \"\"\"\n\n self._spans: list[OptionLineSpan] = []\n \"\"\"A list of the locations and sizes of all options in the option list.\n\n This will be the same size as the number of prompts; each entry in\n the list contains the line offset of the start of the prompt, and\n the count of the lines in the prompt.\n \"\"\"\n\n # Initial calculation of the content tracking.\n self._request_content_tracking_refresh()\n\n self._mouse_hovering_over: int | None = None\n \"\"\"Used to track what the mouse is hovering over.\"\"\"\n\n # Finally, cause the highlighted property to settle down based on\n # the state of the option list in regard to its available options.\n self.action_first()\n\n def _request_content_tracking_refresh(\n self, rescroll_to_highlight: bool = False\n ) -> None:\n \"\"\"Request that the content tracking information gets refreshed.\n\n Args:\n rescroll_to_highlight: Should the widget ensure the highlight is visible?\n\n Calling this method sets a flag to say the refresh should happen,\n and books the refresh call in for the next idle moment.\n \"\"\"\n self._needs_refresh_content_tracking = True\n self._needs_to_scroll_to_highlight = rescroll_to_highlight\n self.check_idle()\n\n async def _on_idle(self, _: Idle) -> None:\n \"\"\"Perform content tracking data refresh when idle.\"\"\"\n self._refresh_content_tracking()\n if self._needs_to_scroll_to_highlight:\n self._needs_to_scroll_to_highlight = False\n self.scroll_to_highlight()\n\n def watch_show_vertical_scrollbar(self) -> None:\n \"\"\"Handle the vertical scrollbar visibility status changing.\n\n `show_vertical_scrollbar` is watched because it has an impact on the\n available width in which to render the renderables that make up the\n options in the list. If a vertical scrollbar appears or disappears\n we need to recalculate all the lines that make up the list.\n \"\"\"\n self._request_content_tracking_refresh()\n\n def _on_resize(self) -> None:\n \"\"\"Refresh the layout of the renderables in the list when resized.\"\"\"\n self._request_content_tracking_refresh(rescroll_to_highlight=True)\n\n def _on_mouse_move(self, event: MouseMove) -> None:\n \"\"\"React to the mouse moving.\n\n Args:\n event: The mouse movement event.\n \"\"\"\n self._mouse_hovering_over = event.style.meta.get(\"option\")\n\n def _on_leave(self, _: Leave) -> None:\n \"\"\"React to the mouse leaving the widget.\"\"\"\n self._mouse_hovering_over = None\n\n async def _on_click(self, event: Click) -> None:\n \"\"\"React to the mouse being clicked on an item.\n\n Args:\n event: The click event.\n \"\"\"\n clicked_option: int | None = event.style.meta.get(\"option\")\n if clicked_option is not None and not self._options[clicked_option].disabled:\n self.highlighted = clicked_option\n self.action_select()\n\n def _make_content(self, content: NewOptionListContent) -> OptionListContent:\n \"\"\"Convert a single item of content for the list into a content type.\n\n Args:\n content: The content to turn into a full option list type.\n\n Returns:\n The content, usable in the option list.\n \"\"\"\n if isinstance(content, (Option, Separator)):\n return content\n if content is None:\n return Separator()\n return Option(content)\n\n def _clear_content_tracking(self) -> None:\n \"\"\"Clear down the content tracking information.\"\"\"\n self._lines.clear()\n self._spans.clear()\n\n def _left_gutter_width(self) -> int:\n \"\"\"Returns the size of any left gutter that should be taken into account.\n\n Returns:\n The width of the left gutter.\n \"\"\"\n return 0\n\n def _refresh_content_tracking(self, force: bool = False) -> None:\n \"\"\"Refresh the various forms of option list content tracking.\n\n Args:\n force: Optionally force the refresh.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n\n Without a `force` the refresh will only take place if it has been\n requested via `_refresh_content_tracking`.\n \"\"\"\n\n # If we don't need to refresh, don't bother.\n if not self._needs_refresh_content_tracking and not force:\n return\n\n # If we don't know our own width yet, we can't sensibly work out the\n # heights of the prompts of the options yet, so let's shortcut that\n # work. We'll be back here once we know our height.\n if not self.size.width:\n return\n\n self._clear_content_tracking()\n self._needs_refresh_content_tracking = False\n\n # Set up for doing less property access work inside the loop.\n lines_from = self.app.console.render_lines\n add_span = self._spans.append\n add_lines = self._lines.extend\n\n # Adjust the options for our purposes.\n options = self.app.console.options.update_width(\n self.scrollable_content_region.width - self._left_gutter_width()\n )\n options.no_wrap = not self._wrap\n if not self._wrap:\n options.overflow = \"ellipsis\"\n\n # Create a rule that can be used as a separator.\n separator = Strip(lines_from(Rule(style=\"\"))[0])\n\n # Work through each item that makes up the content of the list,\n # break out the individual lines that will be used to draw it, and\n # also set up the tracking of the actual options.\n line = 0\n option_index = 0\n padding = self.get_component_styles(\"option-list--option\").padding\n for content in self._contents:\n if isinstance(content, Option):\n # The content is an option, so render out the prompt and\n # work out the lines needed to show it.\n new_lines = [\n Line(\n Strip(prompt_line).apply_style(\n Style(meta={\"option\": option_index})\n ),\n option_index,\n )\n for prompt_line in lines_from(\n Padding(content.prompt, padding) if padding else content.prompt,\n options,\n )\n ]\n # Record the span information for the option.\n add_span(OptionLineSpan(line, len(new_lines)))\n option_index += 1\n else:\n # The content isn't an option, so it must be a separator (if\n # there were to be other non-option content for an option\n # list it's in this if/else where we'd process it).\n new_lines = [Line(separator)]\n add_lines(new_lines)\n line += len(new_lines)\n\n # Now that we know how many lines make up the whole content of the\n # list, set the virtual size.\n self.virtual_size = Size(self.scrollable_content_region.width, len(self._lines))\n\n def _duplicate_id_check(self, candidate_items: list[OptionListContent]) -> None:\n \"\"\"Check the items to be added for any duplicates.\n\n Args:\n candidate_items: The items that are going be added.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n \"\"\"\n # We're only interested in options, and only those that have IDs.\n new_options = [\n item\n for item in candidate_items\n if isinstance(item, Option) and item.id is not None\n ]\n # Get the set of new IDs that we're being given.\n new_option_ids = {option.id for option in new_options}\n # Now check for duplicates, both internally amongst the new items\n # incoming, and also against all the current known IDs.\n if len(new_options) != len(new_option_ids) or not new_option_ids.isdisjoint(\n self._option_ids\n ):\n raise DuplicateID(\"Attempt made to add options with duplicate IDs.\")\n\n def add_options(self, items: Iterable[NewOptionListContent]) -> Self:\n \"\"\"Add new options to the end of the option list.\n\n Args:\n items: The new items to add.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n\n Note:\n All options are checked for duplicate IDs *before* any option is\n added. A duplicate ID will cause none of the passed items to be\n added to the option list.\n \"\"\"\n # Only work if we have items to add; but don't make a fuss out of\n # zero items to add, just carry on like nothing happened.\n if items:\n # Turn any incoming values into valid content for the list.\n content = [self._make_content(item) for item in items]\n self._duplicate_id_check(content)\n self._contents.extend(content)\n # Pull out the content that is genuine options, create any new\n # ID mappings required, then add the new options to the option\n # list.\n new_options = [item for item in content if isinstance(item, Option)]\n for new_option_index, new_option in enumerate(\n new_options, start=len(self._options)\n ):\n if new_option.id:\n self._option_ids[new_option.id] = new_option_index\n self._options.extend(new_options)\n\n self._refresh_content_tracking(force=True)\n self.refresh()\n return self\n\n def add_option(self, item: NewOptionListContent = None) -> Self:\n \"\"\"Add a new option to the end of the option list.\n\n Args:\n item: The new item to add.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n DuplicateID: If there is an attempt to use a duplicate ID.\n \"\"\"\n return self.add_options([item])\n\n def _remove_option(self, index: int) -> None:\n \"\"\"Remove an option from the option list.\n\n Args:\n index: The index of the item to remove.\n\n Raises:\n IndexError: If there is no option of the given index.\n \"\"\"\n option = self._options[index]\n del self._options[index]\n del self._contents[self._contents.index(option)]\n # Decrement index of options after the one we just removed.\n self._option_ids = {\n option_id: option_index - 1 if option_index > index else option_index\n for option_id, option_index in self._option_ids.items()\n if option_index != index\n }\n self._refresh_content_tracking(force=True)\n # Force a re-validation of the highlight.\n self.highlighted = self.highlighted\n self._mouse_hovering_over = None\n self.refresh()\n\n def remove_option(self, option_id: str) -> Self:\n \"\"\"Remove the option with the given ID.\n\n Args:\n option_id: The ID of the option to remove.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n self._remove_option(self.get_option_index(option_id))\n return self\n\n def remove_option_at_index(self, index: int) -> Self:\n \"\"\"Remove the option at the given index.\n\n Args:\n index: The index of the option to remove.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n self._remove_option(index)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index!r}\"\n ) from None\n return self\n\n def _replace_option_prompt(self, index: int, prompt: RenderableType) -> None:\n \"\"\"Replace the prompt of an option in the list.\n\n Args:\n index: The index of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n self.get_option_at_index(index).set_prompt(prompt)\n self._refresh_content_tracking(force=True)\n self.refresh()\n\n def replace_option_prompt(self, option_id: str, prompt: RenderableType) -> Self:\n \"\"\"Replace the prompt of the option with the given ID.\n\n Args:\n option_id: The ID of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n self._replace_option_prompt(self.get_option_index(option_id), prompt)\n return self\n\n def replace_option_prompt_at_index(\n self, index: int, prompt: RenderableType\n ) -> Self:\n \"\"\"Replace the prompt of the option at the given index.\n\n Args:\n index: The index of the option to replace the prompt of.\n prompt: The new prompt for the option.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n self._replace_option_prompt(index, prompt)\n return self\n\n def clear_options(self) -> Self:\n \"\"\"Clear the content of the option list.\n\n Returns:\n The `OptionList` instance.\n \"\"\"\n self._contents.clear()\n self._options.clear()\n self._option_ids.clear()\n self.highlighted = None\n self._mouse_hovering_over = None\n self.virtual_size = Size(self.scrollable_content_region.width, 0)\n self._refresh_content_tracking(force=True)\n return self\n\n def _set_option_disabled(self, index: int, disabled: bool) -> Self:\n \"\"\"Set the disabled state of an option in the list.\n\n Args:\n index: The index of the option to set the disabled state of.\n disabled: The disabled state to set.\n\n Returns:\n The `OptionList` instance.\n \"\"\"\n self._options[index].disabled = disabled\n if index == self.highlighted:\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options, anchor=index, direction=1\n )\n # TODO: Refresh only if the affected option is visible.\n self.refresh()\n return self\n\n def enable_option_at_index(self, index: int) -> Self:\n \"\"\"Enable the option at the given index.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._set_option_disabled(index, False)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def disable_option_at_index(self, index: int) -> Self:\n \"\"\"Disable the option at the given index.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._set_option_disabled(index, True)\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def enable_option(self, option_id: str) -> Self:\n \"\"\"Enable the option with the given ID.\n\n Args:\n option_id: The ID of the option to enable.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.enable_option_at_index(self.get_option_index(option_id))\n\n def disable_option(self, option_id: str) -> Self:\n \"\"\"Disable the option with the given ID.\n\n Args:\n option_id: The ID of the option to disable.\n\n Returns:\n The `OptionList` instance.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.disable_option_at_index(self.get_option_index(option_id))\n\n @property\n def option_count(self) -> int:\n \"\"\"The count of options.\"\"\"\n return len(self._options)\n\n def get_option_at_index(self, index: int) -> Option:\n \"\"\"Get the option at the given index.\n\n Args:\n index: The index of the option to get.\n\n Returns:\n The option at that index.\n\n Raises:\n OptionDoesNotExist: If there is no option with the given index.\n \"\"\"\n try:\n return self._options[index]\n except IndexError:\n raise OptionDoesNotExist(\n f\"There is no option with an index of {index}\"\n ) from None\n\n def get_option(self, option_id: str) -> Option:\n \"\"\"Get the option with the given ID.\n\n Args:\n option_id: The ID of the option to get.\n\n Returns:\n The option with the ID.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n return self.get_option_at_index(self.get_option_index(option_id))\n\n def get_option_index(self, option_id: str) -> int:\n \"\"\"Get the index of the option with the given ID.\n\n Args:\n option_id: The ID of the option to get the index of.\n\n Returns:\n The index of the item with the given ID.\n\n Raises:\n OptionDoesNotExist: If no option has the given ID.\n \"\"\"\n try:\n return self._option_ids[option_id]\n except KeyError:\n raise OptionDoesNotExist(\n f\"There is no option with an ID of '{option_id}'\"\n ) from None\n\n def render_line(self, y: int) -> Strip:\n \"\"\"Render a single line in the option list.\n\n Args:\n y: The Y offset of the line to render.\n\n Returns:\n A `Strip` instance for the caller to render.\n \"\"\"\n\n scroll_x, scroll_y = self.scroll_offset\n\n # First off, work out which line we're working on, based off the\n # current scroll offset plus the line we're being asked to render.\n line_number = scroll_y + y\n try:\n line = self._lines[line_number]\n except IndexError:\n # An IndexError means we're drawing in an option list where\n # there's more list than there are options.\n return Strip([])\n\n # Now that we know which line we're on, pull out the option index so\n # we have a \"local\" copy to refer to rather than needing to do a\n # property access multiple times.\n option_index = line.option_index\n\n # Knowing which line we're going to be drawing, we can now go pull\n # the relevant segments for the line of that particular prompt.\n strip = line.segments\n\n # If the line we're looking at isn't associated with an option, it\n # will be a separator, so let's exit early with that.\n if option_index is None:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--separator\")\n )\n\n # At this point we know we're drawing actual content. To allow for\n # horizontal scrolling, let's crop the strip at the right locations.\n strip = strip.crop(scroll_x, scroll_x + self.scrollable_content_region.width)\n\n highlighted = self.highlighted\n mouse_over = self._mouse_hovering_over\n spans = self._spans\n\n # Handle drawing a disabled option.\n if self._options[option_index].disabled:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-disabled\")\n )\n\n # Handle drawing a highlighted option.\n if highlighted is not None and line_number in spans[highlighted]:\n # Highlighted with the mouse over it?\n if option_index == mouse_over:\n return strip.apply_style(\n self.get_component_rich_style(\n \"option-list--option-hover-highlighted\"\n )\n )\n # Just a normal highlight.\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-highlighted\")\n )\n\n # Perhaps the line is within an otherwise-uninteresting option that\n # has the mouse hovering over it?\n if mouse_over is not None and line_number in spans[mouse_over]:\n return strip.apply_style(\n self.get_component_rich_style(\"option-list--option-hover\")\n )\n\n # It's a normal option line.\n return strip.apply_style(self.rich_style)\n\n def scroll_to_highlight(self, top: bool = False) -> None:\n \"\"\"Ensure that the highlighted option is in view.\n\n Args:\n top: Scroll highlight to top of the list.\n \"\"\"\n highlighted = self.highlighted\n if highlighted is None:\n return\n try:\n span = self._spans[highlighted]\n except IndexError:\n # Index error means we're being asked to scroll to a highlight\n # before all the tracking information has been worked out.\n # That's fine; let's just NoP that.\n return\n self.scroll_to_region(\n Region(\n 0, span.first, self.scrollable_content_region.width, span.line_count\n ),\n force=True,\n animate=False,\n top=top,\n )\n\n def validate_highlighted(self, highlighted: int | None) -> int | None:\n \"\"\"Validate the `highlighted` property value on access.\"\"\"\n if highlighted is None or not self._options:\n return None\n elif highlighted < 0:\n return 0\n elif highlighted >= len(self._options):\n return len(self._options) - 1\n\n return highlighted\n\n def watch_highlighted(self, highlighted: int | None) -> None:\n \"\"\"React to the highlighted option having changed.\"\"\"\n if highlighted is not None and not self._options[highlighted].disabled:\n self.scroll_to_highlight()\n self.post_message(self.OptionHighlighted(self, highlighted))\n\n def action_cursor_up(self) -> None:\n \"\"\"Move the highlight up to the previous enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options,\n anchor=self.highlighted,\n direction=-1,\n )\n\n def action_cursor_down(self) -> None:\n \"\"\"Move the highlight down to the next enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_next_enabled(\n self._options,\n anchor=self.highlighted,\n direction=1,\n )\n\n def action_first(self) -> None:\n \"\"\"Move the highlight to the first enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_first_enabled(self._options)\n\n def action_last(self) -> None:\n \"\"\"Move the highlight to the last enabled option.\"\"\"\n self.highlighted = _widget_navigation.find_last_enabled(self._options)\n\n def _page(self, direction: Direction) -> None:\n \"\"\"Move the highlight roughly by one page in the given direction.\n\n The highlight will tentatively move by exactly one page.\n If this would result in highlighting a disabled option, instead we look for\n an enabled option \"further down\" the list of options.\n If there are no such enabled options, we fallback to the \"last\" enabled option.\n (The meaning of \"further down\" and \"last\" depend on the direction specified.)\n\n Args:\n direction: The direction to head, -1 for up and 1 for down.\n \"\"\"\n\n # If we find ourselves in a position where we don't know where we're\n # going, we need a fallback location. Where we go will depend on the\n # direction.\n fallback = self.action_first if direction == -1 else self.action_last\n\n highlighted = self.highlighted\n if highlighted is None:\n # There is no highlight yet so let's go to the default position.\n fallback()\n else:\n # We want to page roughly by lines, but we're dealing with\n # options that can be a varying number of lines in height. So\n # let's start with the target line alone.\n target_line = max(\n 0,\n self._spans[highlighted].first\n + (direction * self.scrollable_content_region.height),\n )\n try:\n # Now that we've got a target line, let's figure out the\n # index of the target option.\n target_option = self._lines[target_line].option_index\n except IndexError:\n # An index error suggests we've gone out of bounds, let's\n # settle on whatever the call thinks is a good place to wrap\n # to.\n fallback()\n else:\n # Looks like we've figured where we'd like to jump to, we\n # just need to make sure we jump to an option that's enabled.\n if target_option is not None:\n target_option = _widget_navigation.find_next_enabled_no_wrap(\n candidates=self._options,\n anchor=target_option,\n direction=direction,\n with_anchor=True,\n )\n # If we couldn't find an enabled option that's at least one page\n # away from the current one, we instead move less than one page\n # to the last enabled option in the correct direction.\n if target_option is None:\n fallback()\n else:\n self.highlighted = target_option\n\n def action_page_up(self) -> None:\n \"\"\"Move the highlight up roughly by one page.\"\"\"\n self._page(-1)\n\n def action_page_down(self) -> None:\n \"\"\"Move the highlight down roughly by one page.\"\"\"\n self._page(1)\n\n def action_select(self) -> None:\n \"\"\"Select the currently-highlighted option.\n\n If no option is selected, then nothing happens. If an option is\n selected, a [OptionList.OptionSelected][textual.widgets.OptionList.OptionSelected]\n message will be posted.\n \"\"\"\n highlighted = self.highlighted\n if highlighted is not None and not self._options[highlighted].disabled:\n self.post_message(self.OptionSelected(self, highlighted))\n", "path": "src/textual/widgets/_option_list.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index d63a1862df..8d6df22556 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Fixed +- Fixed scrolling in long `OptionList` by adding max height of 100% https://github.com/Textualize/textual/issues/4021 - Fixed `DirectoryTree.clear_node` not clearing the node specified https://github.com/Textualize/textual/issues/4122 ### Changed @@ -29,6 +30,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Added Worker.cancelled_event https://github.com/Textualize/textual/pull/4075 - `Tree` (and `DirectoryTree`) grew an attribute `lock` that can be used for synchronization across coroutines https://github.com/Textualize/textual/issues/4056 + ## [0.48.2] - 2024-02-02 ### Fixed diff --git a/src/textual/widgets/_option_list.py b/src/textual/widgets/_option_list.py index 888140bbee..c59320afda 100644 --- a/src/textual/widgets/_option_list.py +++ b/src/textual/widgets/_option_list.py @@ -177,6 +177,7 @@ class OptionList(ScrollView, can_focus=True): DEFAULT_CSS = """ OptionList { height: auto; + max-height: 100%; background: $boost; color: $text; overflow-x: hidden; diff --git a/tests/snapshot_tests/__snapshots__/test_snapshots.ambr b/tests/snapshot_tests/__snapshots__/test_snapshots.ambr index 98e31a0c51..719a153644 100644 --- a/tests/snapshot_tests/__snapshots__/test_snapshots.ambr +++ b/tests/snapshot_tests/__snapshots__/test_snapshots.ambr @@ -25674,6 +25674,165 @@ ''' # --- +# name: test_option_list_scrolling_in_long_list + ''' + <svg class="rich-terminal" viewBox="0 0 994 635.5999999999999" xmlns="http://www.w3.org/2000/svg"> + <!-- Generated with Rich https://www.textualize.io --> + <style> + + @font-face { + font-family: "Fira Code"; + src: local("FiraCode-Regular"), + url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2") format("woff2"), + url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff") format("woff"); + font-style: normal; + font-weight: 400; + } + @font-face { + font-family: "Fira Code"; + src: local("FiraCode-Bold"), + url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2") format("woff2"), + url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff") format("woff"); + font-style: bold; + font-weight: 700; + } + + .terminal-1990786949-matrix { + font-family: Fira Code, monospace; + font-size: 20px; + line-height: 24.4px; + font-variant-east-asian: full-width; + } + + .terminal-1990786949-title { + font-size: 18px; + font-weight: bold; + font-family: arial; + } + + .terminal-1990786949-r1 { fill: #1e1e1e } + .terminal-1990786949-r2 { fill: #0178d4 } + .terminal-1990786949-r3 { fill: #c5c8c6 } + .terminal-1990786949-r4 { fill: #e2e2e2 } + .terminal-1990786949-r5 { fill: #23568b } + .terminal-1990786949-r6 { fill: #ddedf9;font-weight: bold } + </style> + + <defs> + <clipPath id="terminal-1990786949-clip-terminal"> + <rect x="0" y="0" width="975.0" height="584.5999999999999" /> + </clipPath> + <clipPath id="terminal-1990786949-line-0"> + <rect x="0" y="1.5" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-1"> + <rect x="0" y="25.9" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-2"> + <rect x="0" y="50.3" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-3"> + <rect x="0" y="74.7" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-4"> + <rect x="0" y="99.1" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-5"> + <rect x="0" y="123.5" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-6"> + <rect x="0" y="147.9" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-7"> + <rect x="0" y="172.3" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-8"> + <rect x="0" y="196.7" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-9"> + <rect x="0" y="221.1" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-10"> + <rect x="0" y="245.5" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-11"> + <rect x="0" y="269.9" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-12"> + <rect x="0" y="294.3" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-13"> + <rect x="0" y="318.7" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-14"> + <rect x="0" y="343.1" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-15"> + <rect x="0" y="367.5" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-16"> + <rect x="0" y="391.9" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-17"> + <rect x="0" y="416.3" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-18"> + <rect x="0" y="440.7" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-19"> + <rect x="0" y="465.1" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-20"> + <rect x="0" y="489.5" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-21"> + <rect x="0" y="513.9" width="976" height="24.65"/> + </clipPath> + <clipPath id="terminal-1990786949-line-22"> + <rect x="0" y="538.3" width="976" height="24.65"/> + </clipPath> + </defs> + + <rect fill="#292929" stroke="rgba(255,255,255,0.35)" stroke-width="1" x="1" y="1" width="992" height="633.6" rx="8"/><text class="terminal-1990786949-title" fill="#c5c8c6" text-anchor="middle" x="496" y="27">LongOptionListApp</text> + <g transform="translate(26,22)"> + <circle cx="0" cy="0" r="7" fill="#ff5f57"/> + <circle cx="22" cy="0" r="7" fill="#febc2e"/> + <circle cx="44" cy="0" r="7" fill="#28c840"/> + </g> + + <g transform="translate(9, 41)" clip-path="url(#terminal-1990786949-clip-terminal)"> + <rect fill="#0178d4" x="0" y="1.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="1.5" width="951.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="1.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="25.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="25.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="25.9" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="25.9" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="25.9" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="25.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="25.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="50.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="50.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="50.3" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="50.3" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="50.3" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="50.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="50.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="74.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="74.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="74.7" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="74.7" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="74.7" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="74.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="74.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="99.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="99.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="99.1" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="99.1" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="99.1" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="99.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="99.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="123.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="123.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="123.5" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="123.5" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="123.5" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="123.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="123.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="147.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="147.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="147.9" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="147.9" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="147.9" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="147.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="147.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="172.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="172.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="172.3" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="172.3" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="172.3" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="172.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="172.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="196.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="196.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="196.7" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="196.7" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="196.7" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="196.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="196.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="221.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="221.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="221.1" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="221.1" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="221.1" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="221.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="221.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="245.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="245.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="245.5" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="245.5" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="245.5" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="245.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="245.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="269.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="269.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="269.9" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="269.9" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="269.9" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="269.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="269.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="294.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="294.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="294.3" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="294.3" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="294.3" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="294.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="294.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="318.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="318.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="318.7" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="318.7" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="318.7" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="318.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="318.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="343.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="343.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="343.1" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="343.1" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="343.1" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="343.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="343.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="367.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="367.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="367.5" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="367.5" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="367.5" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="367.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="367.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="391.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="391.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="391.9" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="391.9" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="391.9" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="391.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="391.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="416.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="416.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="416.3" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="416.3" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="416.3" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="416.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="416.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="440.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="440.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="440.7" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="440.7" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#14191f" x="927.2" y="440.7" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="440.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="440.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="465.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="465.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="465.1" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="465.1" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#23568b" x="927.2" y="465.1" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="465.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="465.1" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="489.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="489.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="489.5" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="489.5" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#23568b" x="927.2" y="489.5" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="489.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="489.5" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="513.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="513.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="24.4" y="513.9" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="244" y="513.9" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#23568b" x="927.2" y="513.9" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="513.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="513.9" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="538.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="538.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="24.4" y="538.3" width="219.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="244" y="538.3" width="683.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#23568b" x="927.2" y="538.3" width="24.4" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="951.6" y="538.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="538.3" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#0178d4" x="0" y="562.7" width="12.2" height="24.65" shape-rendering="crispEdges"/><rect fill="#262626" x="12.2" y="562.7" width="951.6" height="24.65" shape-rendering="crispEdges"/><rect fill="#1e1e1e" x="963.8" y="562.7" width="12.2" height="24.65" shape-rendering="crispEdges"/> + <g class="terminal-1990786949-matrix"> + <text class="terminal-1990786949-r1" x="0" y="20" textLength="12.2" clip-path="url(#terminal-1990786949-line-0)">▊</text><text class="terminal-1990786949-r2" x="12.2" y="20" textLength="951.6" clip-path="url(#terminal-1990786949-line-0)">▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔</text><text class="terminal-1990786949-r2" x="963.8" y="20" textLength="12.2" clip-path="url(#terminal-1990786949-line-0)">▎</text><text class="terminal-1990786949-r3" x="976" y="20" textLength="12.2" clip-path="url(#terminal-1990786949-line-0)"> + </text><text class="terminal-1990786949-r1" x="0" y="44.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-1)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="44.4" textLength="219.6" clip-path="url(#terminal-1990786949-line-1)">This&#160;is&#160;option&#160;#78</text><text class="terminal-1990786949-r2" x="963.8" y="44.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-1)">▎</text><text class="terminal-1990786949-r3" x="976" y="44.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-1)"> + </text><text class="terminal-1990786949-r1" x="0" y="68.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-2)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="68.8" textLength="219.6" clip-path="url(#terminal-1990786949-line-2)">This&#160;is&#160;option&#160;#79</text><text class="terminal-1990786949-r2" x="963.8" y="68.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-2)">▎</text><text class="terminal-1990786949-r3" x="976" y="68.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-2)"> + </text><text class="terminal-1990786949-r1" x="0" y="93.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-3)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="93.2" textLength="219.6" clip-path="url(#terminal-1990786949-line-3)">This&#160;is&#160;option&#160;#80</text><text class="terminal-1990786949-r2" x="963.8" y="93.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-3)">▎</text><text class="terminal-1990786949-r3" x="976" y="93.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-3)"> + </text><text class="terminal-1990786949-r1" x="0" y="117.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-4)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="117.6" textLength="219.6" clip-path="url(#terminal-1990786949-line-4)">This&#160;is&#160;option&#160;#81</text><text class="terminal-1990786949-r2" x="963.8" y="117.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-4)">▎</text><text class="terminal-1990786949-r3" x="976" y="117.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-4)"> + </text><text class="terminal-1990786949-r1" x="0" y="142" textLength="12.2" clip-path="url(#terminal-1990786949-line-5)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="142" textLength="219.6" clip-path="url(#terminal-1990786949-line-5)">This&#160;is&#160;option&#160;#82</text><text class="terminal-1990786949-r2" x="963.8" y="142" textLength="12.2" clip-path="url(#terminal-1990786949-line-5)">▎</text><text class="terminal-1990786949-r3" x="976" y="142" textLength="12.2" clip-path="url(#terminal-1990786949-line-5)"> + </text><text class="terminal-1990786949-r1" x="0" y="166.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-6)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="166.4" textLength="219.6" clip-path="url(#terminal-1990786949-line-6)">This&#160;is&#160;option&#160;#83</text><text class="terminal-1990786949-r2" x="963.8" y="166.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-6)">▎</text><text class="terminal-1990786949-r3" x="976" y="166.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-6)"> + </text><text class="terminal-1990786949-r1" x="0" y="190.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-7)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="190.8" textLength="219.6" clip-path="url(#terminal-1990786949-line-7)">This&#160;is&#160;option&#160;#84</text><text class="terminal-1990786949-r2" x="963.8" y="190.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-7)">▎</text><text class="terminal-1990786949-r3" x="976" y="190.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-7)"> + </text><text class="terminal-1990786949-r1" x="0" y="215.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-8)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="215.2" textLength="219.6" clip-path="url(#terminal-1990786949-line-8)">This&#160;is&#160;option&#160;#85</text><text class="terminal-1990786949-r2" x="963.8" y="215.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-8)">▎</text><text class="terminal-1990786949-r3" x="976" y="215.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-8)"> + </text><text class="terminal-1990786949-r1" x="0" y="239.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-9)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="239.6" textLength="219.6" clip-path="url(#terminal-1990786949-line-9)">This&#160;is&#160;option&#160;#86</text><text class="terminal-1990786949-r2" x="963.8" y="239.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-9)">▎</text><text class="terminal-1990786949-r3" x="976" y="239.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-9)"> + </text><text class="terminal-1990786949-r1" x="0" y="264" textLength="12.2" clip-path="url(#terminal-1990786949-line-10)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="264" textLength="219.6" clip-path="url(#terminal-1990786949-line-10)">This&#160;is&#160;option&#160;#87</text><text class="terminal-1990786949-r2" x="963.8" y="264" textLength="12.2" clip-path="url(#terminal-1990786949-line-10)">▎</text><text class="terminal-1990786949-r3" x="976" y="264" textLength="12.2" clip-path="url(#terminal-1990786949-line-10)"> + </text><text class="terminal-1990786949-r1" x="0" y="288.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-11)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="288.4" textLength="219.6" clip-path="url(#terminal-1990786949-line-11)">This&#160;is&#160;option&#160;#88</text><text class="terminal-1990786949-r2" x="963.8" y="288.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-11)">▎</text><text class="terminal-1990786949-r3" x="976" y="288.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-11)"> + </text><text class="terminal-1990786949-r1" x="0" y="312.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-12)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="312.8" textLength="219.6" clip-path="url(#terminal-1990786949-line-12)">This&#160;is&#160;option&#160;#89</text><text class="terminal-1990786949-r2" x="963.8" y="312.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-12)">▎</text><text class="terminal-1990786949-r3" x="976" y="312.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-12)"> + </text><text class="terminal-1990786949-r1" x="0" y="337.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-13)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="337.2" textLength="219.6" clip-path="url(#terminal-1990786949-line-13)">This&#160;is&#160;option&#160;#90</text><text class="terminal-1990786949-r2" x="963.8" y="337.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-13)">▎</text><text class="terminal-1990786949-r3" x="976" y="337.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-13)"> + </text><text class="terminal-1990786949-r1" x="0" y="361.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-14)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="361.6" textLength="219.6" clip-path="url(#terminal-1990786949-line-14)">This&#160;is&#160;option&#160;#91</text><text class="terminal-1990786949-r2" x="963.8" y="361.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-14)">▎</text><text class="terminal-1990786949-r3" x="976" y="361.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-14)"> + </text><text class="terminal-1990786949-r1" x="0" y="386" textLength="12.2" clip-path="url(#terminal-1990786949-line-15)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="386" textLength="219.6" clip-path="url(#terminal-1990786949-line-15)">This&#160;is&#160;option&#160;#92</text><text class="terminal-1990786949-r2" x="963.8" y="386" textLength="12.2" clip-path="url(#terminal-1990786949-line-15)">▎</text><text class="terminal-1990786949-r3" x="976" y="386" textLength="12.2" clip-path="url(#terminal-1990786949-line-15)"> + </text><text class="terminal-1990786949-r1" x="0" y="410.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-16)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="410.4" textLength="219.6" clip-path="url(#terminal-1990786949-line-16)">This&#160;is&#160;option&#160;#93</text><text class="terminal-1990786949-r2" x="963.8" y="410.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-16)">▎</text><text class="terminal-1990786949-r3" x="976" y="410.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-16)"> + </text><text class="terminal-1990786949-r1" x="0" y="434.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-17)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="434.8" textLength="219.6" clip-path="url(#terminal-1990786949-line-17)">This&#160;is&#160;option&#160;#94</text><text class="terminal-1990786949-r2" x="963.8" y="434.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-17)">▎</text><text class="terminal-1990786949-r3" x="976" y="434.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-17)"> + </text><text class="terminal-1990786949-r1" x="0" y="459.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-18)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="459.2" textLength="219.6" clip-path="url(#terminal-1990786949-line-18)">This&#160;is&#160;option&#160;#95</text><text class="terminal-1990786949-r5" x="927.2" y="459.2" textLength="24.4" clip-path="url(#terminal-1990786949-line-18)">▇▇</text><text class="terminal-1990786949-r2" x="963.8" y="459.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-18)">▎</text><text class="terminal-1990786949-r3" x="976" y="459.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-18)"> + </text><text class="terminal-1990786949-r1" x="0" y="483.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-19)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="483.6" textLength="219.6" clip-path="url(#terminal-1990786949-line-19)">This&#160;is&#160;option&#160;#96</text><text class="terminal-1990786949-r2" x="963.8" y="483.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-19)">▎</text><text class="terminal-1990786949-r3" x="976" y="483.6" textLength="12.2" clip-path="url(#terminal-1990786949-line-19)"> + </text><text class="terminal-1990786949-r1" x="0" y="508" textLength="12.2" clip-path="url(#terminal-1990786949-line-20)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="508" textLength="219.6" clip-path="url(#terminal-1990786949-line-20)">This&#160;is&#160;option&#160;#97</text><text class="terminal-1990786949-r2" x="963.8" y="508" textLength="12.2" clip-path="url(#terminal-1990786949-line-20)">▎</text><text class="terminal-1990786949-r3" x="976" y="508" textLength="12.2" clip-path="url(#terminal-1990786949-line-20)"> + </text><text class="terminal-1990786949-r1" x="0" y="532.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-21)">▊</text><text class="terminal-1990786949-r4" x="24.4" y="532.4" textLength="219.6" clip-path="url(#terminal-1990786949-line-21)">This&#160;is&#160;option&#160;#98</text><text class="terminal-1990786949-r2" x="963.8" y="532.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-21)">▎</text><text class="terminal-1990786949-r3" x="976" y="532.4" textLength="12.2" clip-path="url(#terminal-1990786949-line-21)"> + </text><text class="terminal-1990786949-r1" x="0" y="556.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-22)">▊</text><text class="terminal-1990786949-r6" x="24.4" y="556.8" textLength="219.6" clip-path="url(#terminal-1990786949-line-22)">This&#160;is&#160;option&#160;#99</text><text class="terminal-1990786949-r2" x="963.8" y="556.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-22)">▎</text><text class="terminal-1990786949-r3" x="976" y="556.8" textLength="12.2" clip-path="url(#terminal-1990786949-line-22)"> + </text><text class="terminal-1990786949-r1" x="0" y="581.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-23)">▊</text><text class="terminal-1990786949-r2" x="12.2" y="581.2" textLength="951.6" clip-path="url(#terminal-1990786949-line-23)">▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁</text><text class="terminal-1990786949-r2" x="963.8" y="581.2" textLength="12.2" clip-path="url(#terminal-1990786949-line-23)">▎</text> + </g> + </g> + </svg> + + ''' +# --- # name: test_option_list_strings ''' <svg class="rich-terminal" viewBox="0 0 994 635.5999999999999" xmlns="http://www.w3.org/2000/svg"> diff --git a/tests/snapshot_tests/snapshot_apps/option_list_long.py b/tests/snapshot_tests/snapshot_apps/option_list_long.py new file mode 100644 index 0000000000..7971defa10 --- /dev/null +++ b/tests/snapshot_tests/snapshot_apps/option_list_long.py @@ -0,0 +1,12 @@ +from textual.app import App, ComposeResult +from textual.widgets import OptionList +from textual.widgets.option_list import Option + + +class LongOptionListApp(App[None]): + def compose(self) -> ComposeResult: + yield OptionList(*[Option(f"This is option #{n}") for n in range(100)]) + + +if __name__ == "__main__": + LongOptionListApp().run() diff --git a/tests/snapshot_tests/test_snapshots.py b/tests/snapshot_tests/test_snapshots.py index b93d5e90d7..0d7c92bee1 100644 --- a/tests/snapshot_tests/test_snapshots.py +++ b/tests/snapshot_tests/test_snapshots.py @@ -296,6 +296,10 @@ def test_option_list_replace_prompt_from_two_lines_to_three_lines(snap_compare): ) +def test_option_list_scrolling_in_long_list(snap_compare): + assert snap_compare(SNAPSHOT_APPS_DIR / "option_list_long.py", press=["up"]) + + def test_progress_bar_indeterminate(snap_compare): assert snap_compare(WIDGET_EXAMPLES_DIR / "progress_bar_isolated_.py", press=["f"])
PyGithub__PyGithub-797
stack overflow for Team.description Missing `_` means stack overflow as property accessor calls itself indefinitely. Should be `self._description.value` I suspect. Due to client open source policy I cannot submit a PR. Team.py ```python @property def description(self): """ :type: string """ self._completeIfNotSet(self._description) return self.description.value ```
[ { "content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 martinqt <[email protected]> #\n# Copyright 2014 Jan Orel <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Aron Culotta <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2016 mattjmorrison <[email protected]> #\n# Copyright 2018 Isuru Fernando <[email protected]> #\n# Copyright 2018 James D'Amato <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\nimport github.PaginatedList\n\nimport github.Repository\nimport github.NamedUser\nimport github.Organization\n\n\nclass Team(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"name\": self._name.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def members_count(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._members_count)\n return self._members_count.value\n\n @property\n def members_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._members_url)\n return self._members_url.value\n\n @property\n def name(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._name)\n return self._name.value\n\n @property\n def description(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._description)\n return self.description.value\n\n @property\n def permission(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._permission)\n return self._permission.value\n\n @property\n def repos_count(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._repos_count)\n return self._repos_count.value\n\n @property\n def repositories_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._repositories_url)\n return self._repositories_url.value\n\n @property\n def slug(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._slug)\n return self._slug.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def organization(self):\n \"\"\"\n :type: :class:`github.Organization.Organization`\n \"\"\"\n self._completeIfNotSet(self._organization)\n return self._organization.value\n\n @property\n def privacy(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._privacy)\n return self._privacy.value\n\n def add_to_members(self, member):\n \"\"\"\n :calls: `PUT /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/members/\" + member._identity\n )\n\n def add_membership(self, member, role=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.Nameduser.NamedUser`\n :param role: string\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n assert role is github.GithubObject.NotSet or isinstance(\n role, (str, unicode)), role\n if role is not github.GithubObject.NotSet:\n assert role in ['member', 'maintainer']\n put_parameters = {\n \"role\": role,\n }\n else:\n put_parameters = {\n \"role\": \"member\",\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/memberships/\" + member._identity,\n input=put_parameters\n )\n\n def add_to_repos(self, repo):\n \"\"\"\n :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/repos/\" + repo._identity\n )\n\n def set_repo_permission(self, repo, permission):\n \"\"\"\n :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :param permission: string\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n put_parameters = {\n \"permission\": permission,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/repos/\" + repo._identity,\n input=put_parameters\n )\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def edit(self, name, permission=github.GithubObject.NotSet, privacy=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_\n :param name: string\n :param permission: string\n :param privacy: string\n :rtype: None\n \"\"\"\n assert isinstance(name, (str, unicode)), name\n assert permission is github.GithubObject.NotSet or isinstance(permission, (str, unicode)), permission\n assert privacy is github.GithubObject.NotSet or isinstance(privacy, (str, unicode)), privacy\n post_parameters = {\n \"name\": name,\n }\n if permission is not github.GithubObject.NotSet:\n post_parameters[\"permission\"] = permission\n if privacy is not github.GithubObject.NotSet:\n post_parameters[\"privacy\"] = privacy\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)\n\n def get_members(self, role=github.GithubObject.NotSet):\n \"\"\"\n :calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_\n :param role: string\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`\n \"\"\"\n assert role is github.GithubObject.NotSet or isinstance(role, (str, unicode)), role\n url_parameters = dict()\n if role is not github.GithubObject.NotSet:\n assert role in ['member', 'maintainer', 'all']\n url_parameters[\"role\"] = role\n return github.PaginatedList.PaginatedList(\n github.NamedUser.NamedUser,\n self._requester,\n self.url + \"/members\",\n url_parameters\n )\n\n def get_repos(self):\n \"\"\"\n :calls: `GET /teams/:id/repos <http://developer.github.com/v3/orgs/teams>`_\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.Repository.Repository,\n self._requester,\n self.url + \"/repos\",\n None\n )\n\n def has_in_members(self, member):\n \"\"\"\n :calls: `GET /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: bool\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/members/\" + member._identity\n )\n return status == 204\n\n def has_in_repos(self, repo):\n \"\"\"\n :calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: bool\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/repos/\" + repo._identity\n )\n return status == 204\n\n def remove_from_members(self, member):\n \"\"\"\n :calls: `DELETE /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/members/\" + member._identity\n )\n\n def remove_from_repos(self, repo):\n \"\"\"\n :calls: `DELETE /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/repos/\" + repo._identity\n )\n\n @property\n def _identity(self):\n return self.id\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._members_count = github.GithubObject.NotSet\n self._members_url = github.GithubObject.NotSet\n self._name = github.GithubObject.NotSet\n self._description = github.GithubObject.NotSet\n self._permission = github.GithubObject.NotSet\n self._repos_count = github.GithubObject.NotSet\n self._repositories_url = github.GithubObject.NotSet\n self._slug = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._organization = github.GithubObject.NotSet\n self._privacy = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"members_count\" in attributes: # pragma no branch\n self._members_count = self._makeIntAttribute(attributes[\"members_count\"])\n if \"members_url\" in attributes: # pragma no branch\n self._members_url = self._makeStringAttribute(attributes[\"members_url\"])\n if \"name\" in attributes: # pragma no branch\n self._name = self._makeStringAttribute(attributes[\"name\"])\n if \"description\" in attributes: # pragma no branch\n self._description = self._makeStringAttribute(attributes[\"description\"])\n if \"permission\" in attributes: # pragma no branch\n self._permission = self._makeStringAttribute(attributes[\"permission\"])\n if \"repos_count\" in attributes: # pragma no branch\n self._repos_count = self._makeIntAttribute(attributes[\"repos_count\"])\n if \"repositories_url\" in attributes: # pragma no branch\n self._repositories_url = self._makeStringAttribute(attributes[\"repositories_url\"])\n if \"slug\" in attributes: # pragma no branch\n self._slug = self._makeStringAttribute(attributes[\"slug\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"organization\" in attributes: # pragma no branch\n self._organization = self._makeClassAttribute(github.Organization.Organization, attributes[\"organization\"])\n if \"privacy\" in attributes: # pragma no branch\n self._privacy = self._makeStringAttribute(attributes[\"privacy\"])\n", "path": "github/Team.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 martinqt <[email protected]> #\n# Copyright 2014 Jan Orel <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Aron Culotta <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2016 mattjmorrison <[email protected]> #\n# Copyright 2018 Isuru Fernando <[email protected]> #\n# Copyright 2018 James D'Amato <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\nimport github.PaginatedList\n\nimport github.Repository\nimport github.NamedUser\nimport github.Organization\n\n\nclass Team(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"name\": self._name.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def members_count(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._members_count)\n return self._members_count.value\n\n @property\n def members_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._members_url)\n return self._members_url.value\n\n @property\n def name(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._name)\n return self._name.value\n\n @property\n def description(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._description)\n return self._description.value\n\n @property\n def permission(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._permission)\n return self._permission.value\n\n @property\n def repos_count(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._repos_count)\n return self._repos_count.value\n\n @property\n def repositories_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._repositories_url)\n return self._repositories_url.value\n\n @property\n def slug(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._slug)\n return self._slug.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def organization(self):\n \"\"\"\n :type: :class:`github.Organization.Organization`\n \"\"\"\n self._completeIfNotSet(self._organization)\n return self._organization.value\n\n @property\n def privacy(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._privacy)\n return self._privacy.value\n\n def add_to_members(self, member):\n \"\"\"\n :calls: `PUT /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/members/\" + member._identity\n )\n\n def add_membership(self, member, role=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.Nameduser.NamedUser`\n :param role: string\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n assert role is github.GithubObject.NotSet or isinstance(\n role, (str, unicode)), role\n if role is not github.GithubObject.NotSet:\n assert role in ['member', 'maintainer']\n put_parameters = {\n \"role\": role,\n }\n else:\n put_parameters = {\n \"role\": \"member\",\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/memberships/\" + member._identity,\n input=put_parameters\n )\n\n def add_to_repos(self, repo):\n \"\"\"\n :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/repos/\" + repo._identity\n )\n\n def set_repo_permission(self, repo, permission):\n \"\"\"\n :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :param permission: string\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n put_parameters = {\n \"permission\": permission,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/repos/\" + repo._identity,\n input=put_parameters\n )\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def edit(self, name, permission=github.GithubObject.NotSet, privacy=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_\n :param name: string\n :param permission: string\n :param privacy: string\n :rtype: None\n \"\"\"\n assert isinstance(name, (str, unicode)), name\n assert permission is github.GithubObject.NotSet or isinstance(permission, (str, unicode)), permission\n assert privacy is github.GithubObject.NotSet or isinstance(privacy, (str, unicode)), privacy\n post_parameters = {\n \"name\": name,\n }\n if permission is not github.GithubObject.NotSet:\n post_parameters[\"permission\"] = permission\n if privacy is not github.GithubObject.NotSet:\n post_parameters[\"privacy\"] = privacy\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)\n\n def get_members(self, role=github.GithubObject.NotSet):\n \"\"\"\n :calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_\n :param role: string\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`\n \"\"\"\n assert role is github.GithubObject.NotSet or isinstance(role, (str, unicode)), role\n url_parameters = dict()\n if role is not github.GithubObject.NotSet:\n assert role in ['member', 'maintainer', 'all']\n url_parameters[\"role\"] = role\n return github.PaginatedList.PaginatedList(\n github.NamedUser.NamedUser,\n self._requester,\n self.url + \"/members\",\n url_parameters\n )\n\n def get_repos(self):\n \"\"\"\n :calls: `GET /teams/:id/repos <http://developer.github.com/v3/orgs/teams>`_\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.Repository.Repository,\n self._requester,\n self.url + \"/repos\",\n None\n )\n\n def has_in_members(self, member):\n \"\"\"\n :calls: `GET /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: bool\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/members/\" + member._identity\n )\n return status == 204\n\n def has_in_repos(self, repo):\n \"\"\"\n :calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: bool\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/repos/\" + repo._identity\n )\n return status == 204\n\n def remove_from_members(self, member):\n \"\"\"\n :calls: `DELETE /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_\n :param member: :class:`github.NamedUser.NamedUser`\n :rtype: None\n \"\"\"\n assert isinstance(member, github.NamedUser.NamedUser), member\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/members/\" + member._identity\n )\n\n def remove_from_repos(self, repo):\n \"\"\"\n :calls: `DELETE /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_\n :param repo: :class:`github.Repository.Repository`\n :rtype: None\n \"\"\"\n assert isinstance(repo, github.Repository.Repository), repo\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/repos/\" + repo._identity\n )\n\n @property\n def _identity(self):\n return self.id\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._members_count = github.GithubObject.NotSet\n self._members_url = github.GithubObject.NotSet\n self._name = github.GithubObject.NotSet\n self._description = github.GithubObject.NotSet\n self._permission = github.GithubObject.NotSet\n self._repos_count = github.GithubObject.NotSet\n self._repositories_url = github.GithubObject.NotSet\n self._slug = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._organization = github.GithubObject.NotSet\n self._privacy = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"members_count\" in attributes: # pragma no branch\n self._members_count = self._makeIntAttribute(attributes[\"members_count\"])\n if \"members_url\" in attributes: # pragma no branch\n self._members_url = self._makeStringAttribute(attributes[\"members_url\"])\n if \"name\" in attributes: # pragma no branch\n self._name = self._makeStringAttribute(attributes[\"name\"])\n if \"description\" in attributes: # pragma no branch\n self._description = self._makeStringAttribute(attributes[\"description\"])\n if \"permission\" in attributes: # pragma no branch\n self._permission = self._makeStringAttribute(attributes[\"permission\"])\n if \"repos_count\" in attributes: # pragma no branch\n self._repos_count = self._makeIntAttribute(attributes[\"repos_count\"])\n if \"repositories_url\" in attributes: # pragma no branch\n self._repositories_url = self._makeStringAttribute(attributes[\"repositories_url\"])\n if \"slug\" in attributes: # pragma no branch\n self._slug = self._makeStringAttribute(attributes[\"slug\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"organization\" in attributes: # pragma no branch\n self._organization = self._makeClassAttribute(github.Organization.Organization, attributes[\"organization\"])\n if \"privacy\" in attributes: # pragma no branch\n self._privacy = self._makeStringAttribute(attributes[\"privacy\"])\n", "path": "github/Team.py" } ]
diff --git a/github/Team.py b/github/Team.py index b3be04cc37..54ec0e938f 100644 --- a/github/Team.py +++ b/github/Team.py @@ -89,7 +89,7 @@ def description(self): :type: string """ self._completeIfNotSet(self._description) - return self.description.value + return self._description.value @property def permission(self):
PennyLaneAI__pennylane-2947
[BUG] `qml.equal` ignore in-place inversion Currently, we have: ``` >>> qml.equal(qml.RX(1.0, wires=0), qml.RX(1.0, wires=0).inv()) True ``` If two operations are inverses of each other, they should not be equal.
[ { "content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.equal function.\n\"\"\"\n# pylint: disable=too-many-arguments,too-many-return-statements\nimport pennylane as qml\nfrom pennylane.operation import Operator\n\n\ndef equal(\n op1: Operator,\n op2: Operator,\n check_interface=True,\n check_trainability=True,\n rtol=1e-5,\n atol=1e-9,\n):\n r\"\"\"Function for determining operator equality.\n\n Args:\n op1 (.Operator): First operator to compare\n op2 (.Operator): Second operator to compare\n check_interface (bool, optional): Whether to compare interfaces. Default: `True`\n check_trainability (bool, optional): Whether to compare trainability status. Default: `True`\n rtol (float, optional): Relative tolerance for parameters\n atol (float, optional): Absolute tolerance for parameters\n\n Returns:\n bool: `True` if the operators are equal, else `False`\n\n **Example**\n\n Given two operators, ``qml.equal`` determines their equality:\n\n >>> op1 = qml.RX(np.array(.12), wires=0)\n >>> op2 = qml.RY(np.array(1.23), wires=0)\n >>> qml.equal(op1, op1), qml.equal(op1, op2)\n True False\n\n .. details::\n :title: Usage Details\n\n You can use the optional arguments to get more specific results.\n\n Consider the following comparisons:\n\n >>> op1 = qml.RX(torch.tensor(1.2), wires=0)\n >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0)\n >>> qml.equal(op1, op2)\n False\n\n >>> qml.equal(op1, op2, check_interface=False, check_trainability=False)\n True\n\n >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0)\n >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0)\n >>> qml.equal(op3, op4)\n False\n\n >>> qml.equal(op3, op4, check_trainability=False)\n True\n \"\"\"\n if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth:\n return False\n if op1.arithmetic_depth > 0:\n raise NotImplementedError(\n \"Comparison of operators with an arithmetic depth larger than 0 is not yet implemented.\"\n )\n if not all(\n qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data)\n ):\n return False\n if op1.wires != op2.wires:\n return False\n for kwarg in op1.hyperparameters:\n if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]:\n return False\n\n if check_trainability:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2):\n return False\n\n if check_interface:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n\n return True\n", "path": "pennylane/ops/functions/equal.py" } ]
[ { "content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.equal function.\n\"\"\"\n# pylint: disable=too-many-arguments,too-many-return-statements\nimport pennylane as qml\nfrom pennylane.operation import Operator\n\n\ndef equal(\n op1: Operator,\n op2: Operator,\n check_interface=True,\n check_trainability=True,\n rtol=1e-5,\n atol=1e-9,\n):\n r\"\"\"Function for determining operator equality.\n\n Args:\n op1 (.Operator): First operator to compare\n op2 (.Operator): Second operator to compare\n check_interface (bool, optional): Whether to compare interfaces. Default: `True`\n check_trainability (bool, optional): Whether to compare trainability status. Default: `True`\n rtol (float, optional): Relative tolerance for parameters\n atol (float, optional): Absolute tolerance for parameters\n\n Returns:\n bool: `True` if the operators are equal, else `False`\n\n **Example**\n\n Given two operators, ``qml.equal`` determines their equality:\n\n >>> op1 = qml.RX(np.array(.12), wires=0)\n >>> op2 = qml.RY(np.array(1.23), wires=0)\n >>> qml.equal(op1, op1), qml.equal(op1, op2)\n True False\n\n .. details::\n :title: Usage Details\n\n You can use the optional arguments to get more specific results.\n\n Consider the following comparisons:\n\n >>> op1 = qml.RX(torch.tensor(1.2), wires=0)\n >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0)\n >>> qml.equal(op1, op2)\n False\n\n >>> qml.equal(op1, op2, check_interface=False, check_trainability=False)\n True\n\n >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0)\n >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0)\n >>> qml.equal(op3, op4)\n False\n\n >>> qml.equal(op3, op4, check_trainability=False)\n True\n \"\"\"\n if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth:\n return False\n if op1.arithmetic_depth > 0:\n raise NotImplementedError(\n \"Comparison of operators with an arithmetic depth larger than 0 is not yet implemented.\"\n )\n if not all(\n qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data)\n ):\n return False\n if op1.wires != op2.wires:\n return False\n for kwarg in op1.hyperparameters:\n if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]:\n return False\n\n if check_trainability:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2):\n return False\n\n if check_interface:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n\n return getattr(op1, \"inverse\", False) == getattr(op2, \"inverse\", False)\n", "path": "pennylane/ops/functions/equal.py" } ]
diff --git a/doc/releases/changelog-0.25.0.md b/doc/releases/changelog-0.25.0.md index 1696b9060f3..3a49b7bd45e 100644 --- a/doc/releases/changelog-0.25.0.md +++ b/doc/releases/changelog-0.25.0.md @@ -710,6 +710,9 @@ of operators. <h3>Bug fixes 🐞</h3> +* Fixes `qml.equal` so that operators with different inverse properties are not equal. + [(#2947)](https://github.com/PennyLaneAI/pennylane/pull/2947) + * Cleans up interactions between operator arithmetic and batching by testing supported cases and adding errors when batching is not supported. [(#2900)](https://github.com/PennyLaneAI/pennylane/pull/2900) diff --git a/pennylane/ops/functions/equal.py b/pennylane/ops/functions/equal.py index 531b9cebea6..a4cdbd09200 100644 --- a/pennylane/ops/functions/equal.py +++ b/pennylane/ops/functions/equal.py @@ -98,4 +98,4 @@ def equal( if qml.math.get_interface(params_1) != qml.math.get_interface(params_2): return False - return True + return getattr(op1, "inverse", False) == getattr(op2, "inverse", False) diff --git a/tests/ops/functions/test_equal.py b/tests/ops/functions/test_equal.py index 5d8b2bf075e..d72118c340f 100644 --- a/tests/ops/functions/test_equal.py +++ b/tests/ops/functions/test_equal.py @@ -847,3 +847,15 @@ def test_equal_with_nested_operators_raises_error(self): + " depth larger than 0 is not yet implemented.", ): qml.equal(qml.adjoint(qml.PauliX(0)), qml.adjoint(qml.PauliX(0))) + + def test_equal_same_inversion(self): + """Test operations are equal if they are both inverted.""" + op1 = qml.RX(1.2, wires=0).inv() + op2 = qml.RX(1.2, wires=0).inv() + assert qml.equal(op1, op2) + + def test_not_equal_different_inversion(self): + """Test operations are not equal if one is inverted and the other is not.""" + op1 = qml.PauliX(0) + op2 = qml.PauliX(0).inv() + assert not qml.equal(op1, op2)
numpy__numpy-13571
DOC: expand_dims returns a view Hi, if we apply expand_dims to a numpy array 'a' : b = numpy.expand_dims(a, axis=0) the 'b' is not a new array, it will be a link to the objects 'a' is it correct behavior? if so, it should be noted in the manual thanks!
[ { "content": "from __future__ import division, absolute_import, print_function\n\nimport functools\nimport warnings\n\nimport numpy.core.numeric as _nx\nfrom numpy.core.numeric import (\n asarray, zeros, outer, concatenate, array, asanyarray\n )\nfrom numpy.core.fromnumeric import product, reshape, transpose\nfrom numpy.core.multiarray import normalize_axis_index\nfrom numpy.core import overrides\nfrom numpy.core import vstack, atleast_3d\nfrom numpy.core.shape_base import _arrays_for_stack_dispatcher\nfrom numpy.lib.index_tricks import ndindex\nfrom numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells\n\n\n__all__ = [\n 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',\n 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',\n 'put_along_axis'\n ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _make_along_axis_idx(arr_shape, indices, axis):\n\t# compute dimensions to iterate over\n if not _nx.issubdtype(indices.dtype, _nx.integer):\n raise IndexError('`indices` must be an integer array')\n if len(arr_shape) != indices.ndim:\n raise ValueError(\n \"`indices` and `arr` must have the same number of dimensions\")\n shape_ones = (1,) * indices.ndim\n dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr_shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]\n fancy_index.append(_nx.arange(n).reshape(ind_shape))\n\n return tuple(fancy_index)\n\n\ndef _take_along_axis_dispatcher(arr, indices, axis):\n return (arr, indices)\n\n\n@array_function_dispatch(_take_along_axis_dispatcher)\ndef take_along_axis(arr, indices, axis):\n \"\"\"\n Take values from the input array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to look up values in the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n arr: ndarray (Ni..., M, Nk...)\n Source array\n indices: ndarray (Ni..., J, Nk...)\n Indices to take along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions Ni and Nj only need to broadcast\n against `arr`.\n axis: int\n The axis to take 1d slices along. If axis is None, the input array is\n treated as if it had first been flattened to 1d, for consistency with\n `sort` and `argsort`.\n\n Returns\n -------\n out: ndarray (Ni..., J, Nk...)\n The indexed result.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n out = np.empty(Nk + (J,) + Nk)\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n out_1d = out [ii + s_[:,] + kk]\n for j in range(J):\n out_1d[j] = a_1d[indices_1d[j]]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n out_1d[:] = a_1d[indices_1d]\n\n See Also\n --------\n take : Take along an axis, using the same indices for every 1d slice\n put_along_axis :\n Put values into the destination array by matching 1d index and data slices\n\n Examples\n --------\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can sort either by using sort directly, or argsort and this function\n\n >>> np.sort(a, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n >>> ai = np.argsort(a, axis=1); ai\n array([[0, 2, 1],\n [1, 2, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n\n The same works for max and min, if you expand the dimensions:\n\n >>> np.expand_dims(np.max(a, axis=1), axis=1)\n array([[30],\n [60]])\n >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai\n array([[1],\n [0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[30],\n [60]])\n\n If we want to get the max and min at the same time, we can stack the\n indices first\n\n >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)\n >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai = np.concatenate([ai_min, ai_max], axis=1)\n >>> ai\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 30],\n [40, 60]])\n \"\"\"\n # normalize inputs\n if axis is None:\n arr = arr.flat\n arr_shape = (len(arr),) # flatiter has no .shape\n axis = 0\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n return arr[_make_along_axis_idx(arr_shape, indices, axis)]\n\n\ndef _put_along_axis_dispatcher(arr, indices, values, axis):\n return (arr, indices, values)\n\n\n@array_function_dispatch(_put_along_axis_dispatcher)\ndef put_along_axis(arr, indices, values, axis):\n \"\"\"\n Put values into the destination array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to place values into the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n arr: ndarray (Ni..., M, Nk...)\n Destination array.\n indices: ndarray (Ni..., J, Nk...)\n Indices to change along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast\n against `arr`.\n values: array_like (Ni..., J, Nk...)\n values to insert at those indices. Its shape and dimension are\n broadcast to match that of `indices`.\n axis: int\n The axis to take 1d slices along. If axis is None, the destination\n array is treated as if a flattened 1d view had been created of it.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n values_1d = values [ii + s_[:,] + kk]\n for j in range(J):\n a_1d[indices_1d[j]] = values_1d[j]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n a_1d[indices_1d] = values_1d\n\n See Also\n --------\n take_along_axis :\n Take values from the input array by matching 1d index and data slices\n\n Examples\n --------\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can replace the maximum values with:\n\n >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai\n array([[1],\n [0]])\n >>> np.put_along_axis(a, ai, 99, axis=1)\n >>> a\n array([[10, 99, 20],\n [99, 40, 50]])\n\n \"\"\"\n # normalize inputs\n if axis is None:\n arr = arr.flat\n axis = 0\n arr_shape = (len(arr),) # flatiter has no .shape\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n arr[_make_along_axis_idx(arr_shape, indices, axis)] = values\n\n\ndef _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):\n return (arr,)\n\n\n@array_function_dispatch(_apply_along_axis_dispatcher)\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`\n is a 1-D slice of `arr` along `axis`.\n\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n f = func1d(arr[ii + s_[:,] + kk])\n Nj = f.shape\n for jj in ndindex(Nj):\n out[ii + jj + kk] = f[jj]\n\n Equivalently, eliminating the inner loop, this can be expressed as::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])\n\n Parameters\n ----------\n func1d : function (M,) -> (Nj...)\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray (Ni..., M, Nk...)\n Input array.\n args : any\n Additional arguments to `func1d`.\n kwargs : any\n Additional named arguments to `func1d`.\n\n .. versionadded:: 1.9.0\n\n\n Returns\n -------\n out : ndarray (Ni..., Nj..., Nk...)\n The output array. The shape of `out` is identical to the shape of\n `arr`, except along the `axis` dimension. This axis is removed, and\n replaced with new dimensions equal to the shape of the return value\n of `func1d`. So if `func1d` returns a scalar `out` will have one\n fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> def my_func(a):\n ... \\\"\\\"\\\"Average first and last element of a 1-D array\\\"\\\"\\\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([2., 5., 8.])\n\n For a function that returns a 1D array, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n For a function that returns a higher dimensional array, those dimensions\n are inserted in place of the `axis` dimension.\n\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(np.diag, -1, b)\n array([[[1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]],\n [[4, 0, 0],\n [0, 5, 0],\n [0, 0, 6]],\n [[7, 0, 0],\n [0, 8, 0],\n [0, 0, 9]]])\n \"\"\"\n # handle negative axes\n arr = asanyarray(arr)\n nd = arr.ndim\n axis = normalize_axis_index(axis, nd)\n\n # arr, with the iteration axis at the end\n in_dims = list(range(nd))\n inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars, which fixes gh-8642\n inds = ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')\n res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # permutation of axes such that out = buff.transpose(buff_permute)\n buff_dims = list(range(buff.ndim))\n buff_permute = (\n buff_dims[0 : axis] +\n buff_dims[buff.ndim-res.ndim : buff.ndim] +\n buff_dims[axis : buff.ndim-res.ndim]\n )\n\n # matrices have a nasty __array_prepare__ and __array_wrap__\n if not isinstance(res, matrix):\n buff = res.__array_prepare__(buff)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))\n\n if not isinstance(res, matrix):\n # wrap the array, to preserve subclasses\n buff = res.__array_wrap__(buff)\n\n # finally, rotate the inserted axes back to where they belong\n return transpose(buff, buff_permute)\n\n else:\n # matrices have to be transposed first, because they collapse dimensions!\n out_arr = transpose(buff, buff_permute)\n return res.__array_wrap__(out_arr)\n\n\ndef _apply_over_axes_dispatcher(func, a, axes):\n return (a,)\n\n\n@array_function_dispatch(_apply_over_axes_dispatcher)\ndef apply_over_axes(func, a, axes):\n \"\"\"\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n ------\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been available since\n version 1.7.0.\n\n Examples\n --------\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n \"\"\"\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\n\n\ndef _expand_dims_dispatcher(a, axis):\n return (a,)\n\n\n@array_function_dispatch(_expand_dims_dispatcher)\ndef expand_dims(a, axis):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n array shape.\n\n .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor\n ``axis > a.ndim`` raised errors or put the new axis where documented.\n Those axis values are now deprecated and will raise an AxisError in the\n future.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n doc.indexing, atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n \"\"\"\n if isinstance(a, matrix):\n a = asarray(a)\n else:\n a = asanyarray(a)\n\n shape = a.shape\n if axis > a.ndim or axis < -a.ndim - 1:\n # 2017-05-17, 1.13.0\n warnings.warn(\"Both axis > a.ndim and axis < -a.ndim - 1 are \"\n \"deprecated and will raise an AxisError in the future.\",\n DeprecationWarning, stacklevel=2)\n # When the deprecation period expires, delete this if block,\n if axis < 0:\n axis = axis + a.ndim + 1\n # and uncomment the following line.\n # axis = normalize_axis_index(axis, a.ndim + 1)\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\nrow_stack = vstack\n\n\ndef _column_stack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_column_stack_dispatcher)\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n \"\"\"\n arrays = []\n for v in tup:\n arr = array(v, copy=False, subok=True)\n if arr.ndim < 2:\n arr = array(arr, copy=False, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\n\ndef _dstack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_dstack_dispatcher)\ndef dstack(tup):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n See Also\n --------\n stack : Join a sequence of arrays along a new axis.\n vstack : Stack along first axis.\n hstack : Stack along second axis.\n concatenate : Join a sequence of arrays along an existing axis.\n dsplit : Split array along third axis.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n \"\"\"\n return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if _nx.ndim(sub_arys[i]) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\n\ndef _array_split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_array_split_dispatcher)\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis. For an array of length l that should be split\n into n sections, it returns l % n sub-arrays of size l//n + 1\n and the rest of size l//n.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n\n \"\"\"\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try:\n # handle array case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError:\n # indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = ([0] +\n extras * [Neach_section+1] +\n (Nsections-extras) * [Neach_section])\n div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]\n end = div_points[i + 1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n return sub_arys\n\n\ndef _split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_split_dispatcher)\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([0., 1., 2.]),\n array([3., 4.]),\n array([5.]),\n array([6., 7.]),\n array([], dtype=float64)]\n\n \"\"\"\n try:\n len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError(\n 'array split does not result in an equal division')\n res = array_split(ary, indices_or_sections, axis)\n return res\n\n\ndef _hvdsplit_dispatcher(ary, indices_or_sections):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef hsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[0., 1.]],\n [[4., 5.]]]),\n array([[[2., 3.]],\n [[6., 7.]]])]\n\n \"\"\"\n if _nx.ndim(ary) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef vsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n if _nx.ndim(ary) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n \"\"\"\n if _nx.ndim(ary) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\ndef get_array_prepare(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_prepare__) for i, x in enumerate(args)\n if hasattr(x, '__array_prepare__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef get_array_wrap(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\n\ndef _kron_dispatcher(a, b):\n return (a, b)\n\n\n@array_function_dispatch(_kron_dispatcher)\ndef kron(a, b):\n \"\"\"\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, ..., 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, ..., 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[1., 1., 0., 0.],\n [1., 1., 0., 0.],\n [0., 0., 1., 1.],\n [0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n \"\"\"\n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n nd = ndb\n if (ndb != nda):\n if (ndb > nda):\n as_ = (1,)*(ndb-nda) + as_\n else:\n bs = (1,)*(nda-ndb) + bs\n nd = nda\n result = outer(a, b).reshape(as_+bs)\n axis = nd-1\n for _ in range(nd):\n result = concatenate(result, axis=axis)\n wrapper = get_array_prepare(a, b)\n if wrapper is not None:\n result = wrapper(result)\n wrapper = get_array_wrap(a, b)\n if wrapper is not None:\n result = wrapper(result)\n return result\n\n\ndef _tile_dispatcher(A, reps):\n return (A, reps)\n\n\n@array_function_dispatch(_tile_dispatcher)\ndef tile(A, reps):\n \"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Note : Although tile may be used for broadcasting, it is strongly\n recommended to use numpy's broadcasting operations and functions.\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n broadcast_to : Broadcast an array to a new shape\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4]])\n \"\"\"\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):\n # Fixes the problem that the function does not make a copy if A is a\n # numpy array and the repetitions are 1 in all dimensions\n return _nx.array(A, copy=True, subok=True, ndmin=d)\n else:\n # Note that no copy of zero-sized arrays is made. However since they\n # have no data there is no risk of an inadvertent overwrite.\n c = _nx.array(A, copy=False, subok=True, ndmin=d)\n if (d < c.ndim):\n tup = (1,)*(c.ndim-d) + tup\n shape_out = tuple(s*t for s, t in zip(c.shape, tup))\n n = c.size\n if n > 0:\n for dim_in, nrep in zip(c.shape, tup):\n if nrep != 1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n n //= dim_in\n return c.reshape(shape_out)\n", "path": "numpy/lib/shape_base.py" } ]
[ { "content": "from __future__ import division, absolute_import, print_function\n\nimport functools\nimport warnings\n\nimport numpy.core.numeric as _nx\nfrom numpy.core.numeric import (\n asarray, zeros, outer, concatenate, array, asanyarray\n )\nfrom numpy.core.fromnumeric import product, reshape, transpose\nfrom numpy.core.multiarray import normalize_axis_index\nfrom numpy.core import overrides\nfrom numpy.core import vstack, atleast_3d\nfrom numpy.core.shape_base import _arrays_for_stack_dispatcher\nfrom numpy.lib.index_tricks import ndindex\nfrom numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells\n\n\n__all__ = [\n 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',\n 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',\n 'put_along_axis'\n ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _make_along_axis_idx(arr_shape, indices, axis):\n\t# compute dimensions to iterate over\n if not _nx.issubdtype(indices.dtype, _nx.integer):\n raise IndexError('`indices` must be an integer array')\n if len(arr_shape) != indices.ndim:\n raise ValueError(\n \"`indices` and `arr` must have the same number of dimensions\")\n shape_ones = (1,) * indices.ndim\n dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr_shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]\n fancy_index.append(_nx.arange(n).reshape(ind_shape))\n\n return tuple(fancy_index)\n\n\ndef _take_along_axis_dispatcher(arr, indices, axis):\n return (arr, indices)\n\n\n@array_function_dispatch(_take_along_axis_dispatcher)\ndef take_along_axis(arr, indices, axis):\n \"\"\"\n Take values from the input array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to look up values in the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n arr: ndarray (Ni..., M, Nk...)\n Source array\n indices: ndarray (Ni..., J, Nk...)\n Indices to take along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions Ni and Nj only need to broadcast\n against `arr`.\n axis: int\n The axis to take 1d slices along. If axis is None, the input array is\n treated as if it had first been flattened to 1d, for consistency with\n `sort` and `argsort`.\n\n Returns\n -------\n out: ndarray (Ni..., J, Nk...)\n The indexed result.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n out = np.empty(Nk + (J,) + Nk)\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n out_1d = out [ii + s_[:,] + kk]\n for j in range(J):\n out_1d[j] = a_1d[indices_1d[j]]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n out_1d[:] = a_1d[indices_1d]\n\n See Also\n --------\n take : Take along an axis, using the same indices for every 1d slice\n put_along_axis :\n Put values into the destination array by matching 1d index and data slices\n\n Examples\n --------\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can sort either by using sort directly, or argsort and this function\n\n >>> np.sort(a, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n >>> ai = np.argsort(a, axis=1); ai\n array([[0, 2, 1],\n [1, 2, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n\n The same works for max and min, if you expand the dimensions:\n\n >>> np.expand_dims(np.max(a, axis=1), axis=1)\n array([[30],\n [60]])\n >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai\n array([[1],\n [0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[30],\n [60]])\n\n If we want to get the max and min at the same time, we can stack the\n indices first\n\n >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)\n >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai = np.concatenate([ai_min, ai_max], axis=1)\n >>> ai\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 30],\n [40, 60]])\n \"\"\"\n # normalize inputs\n if axis is None:\n arr = arr.flat\n arr_shape = (len(arr),) # flatiter has no .shape\n axis = 0\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n return arr[_make_along_axis_idx(arr_shape, indices, axis)]\n\n\ndef _put_along_axis_dispatcher(arr, indices, values, axis):\n return (arr, indices, values)\n\n\n@array_function_dispatch(_put_along_axis_dispatcher)\ndef put_along_axis(arr, indices, values, axis):\n \"\"\"\n Put values into the destination array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to place values into the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n arr: ndarray (Ni..., M, Nk...)\n Destination array.\n indices: ndarray (Ni..., J, Nk...)\n Indices to change along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast\n against `arr`.\n values: array_like (Ni..., J, Nk...)\n values to insert at those indices. Its shape and dimension are\n broadcast to match that of `indices`.\n axis: int\n The axis to take 1d slices along. If axis is None, the destination\n array is treated as if a flattened 1d view had been created of it.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n values_1d = values [ii + s_[:,] + kk]\n for j in range(J):\n a_1d[indices_1d[j]] = values_1d[j]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n a_1d[indices_1d] = values_1d\n\n See Also\n --------\n take_along_axis :\n Take values from the input array by matching 1d index and data slices\n\n Examples\n --------\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can replace the maximum values with:\n\n >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)\n >>> ai\n array([[1],\n [0]])\n >>> np.put_along_axis(a, ai, 99, axis=1)\n >>> a\n array([[10, 99, 20],\n [99, 40, 50]])\n\n \"\"\"\n # normalize inputs\n if axis is None:\n arr = arr.flat\n axis = 0\n arr_shape = (len(arr),) # flatiter has no .shape\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n arr[_make_along_axis_idx(arr_shape, indices, axis)] = values\n\n\ndef _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):\n return (arr,)\n\n\n@array_function_dispatch(_apply_along_axis_dispatcher)\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`\n is a 1-D slice of `arr` along `axis`.\n\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n f = func1d(arr[ii + s_[:,] + kk])\n Nj = f.shape\n for jj in ndindex(Nj):\n out[ii + jj + kk] = f[jj]\n\n Equivalently, eliminating the inner loop, this can be expressed as::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])\n\n Parameters\n ----------\n func1d : function (M,) -> (Nj...)\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray (Ni..., M, Nk...)\n Input array.\n args : any\n Additional arguments to `func1d`.\n kwargs : any\n Additional named arguments to `func1d`.\n\n .. versionadded:: 1.9.0\n\n\n Returns\n -------\n out : ndarray (Ni..., Nj..., Nk...)\n The output array. The shape of `out` is identical to the shape of\n `arr`, except along the `axis` dimension. This axis is removed, and\n replaced with new dimensions equal to the shape of the return value\n of `func1d`. So if `func1d` returns a scalar `out` will have one\n fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> def my_func(a):\n ... \\\"\\\"\\\"Average first and last element of a 1-D array\\\"\\\"\\\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([2., 5., 8.])\n\n For a function that returns a 1D array, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n For a function that returns a higher dimensional array, those dimensions\n are inserted in place of the `axis` dimension.\n\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(np.diag, -1, b)\n array([[[1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]],\n [[4, 0, 0],\n [0, 5, 0],\n [0, 0, 6]],\n [[7, 0, 0],\n [0, 8, 0],\n [0, 0, 9]]])\n \"\"\"\n # handle negative axes\n arr = asanyarray(arr)\n nd = arr.ndim\n axis = normalize_axis_index(axis, nd)\n\n # arr, with the iteration axis at the end\n in_dims = list(range(nd))\n inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars, which fixes gh-8642\n inds = ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')\n res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # permutation of axes such that out = buff.transpose(buff_permute)\n buff_dims = list(range(buff.ndim))\n buff_permute = (\n buff_dims[0 : axis] +\n buff_dims[buff.ndim-res.ndim : buff.ndim] +\n buff_dims[axis : buff.ndim-res.ndim]\n )\n\n # matrices have a nasty __array_prepare__ and __array_wrap__\n if not isinstance(res, matrix):\n buff = res.__array_prepare__(buff)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))\n\n if not isinstance(res, matrix):\n # wrap the array, to preserve subclasses\n buff = res.__array_wrap__(buff)\n\n # finally, rotate the inserted axes back to where they belong\n return transpose(buff, buff_permute)\n\n else:\n # matrices have to be transposed first, because they collapse dimensions!\n out_arr = transpose(buff, buff_permute)\n return res.__array_wrap__(out_arr)\n\n\ndef _apply_over_axes_dispatcher(func, a, axes):\n return (a,)\n\n\n@array_function_dispatch(_apply_over_axes_dispatcher)\ndef apply_over_axes(func, a, axes):\n \"\"\"\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n ------\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been available since\n version 1.7.0.\n\n Examples\n --------\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n \"\"\"\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\n\n\ndef _expand_dims_dispatcher(a, axis):\n return (a,)\n\n\n@array_function_dispatch(_expand_dims_dispatcher)\ndef expand_dims(a, axis):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n array shape.\n\n .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor\n ``axis > a.ndim`` raised errors or put the new axis where documented.\n Those axis values are now deprecated and will raise an AxisError in the\n future.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n View of `a` with the number of dimensions increased by one.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n doc.indexing, atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n \"\"\"\n if isinstance(a, matrix):\n a = asarray(a)\n else:\n a = asanyarray(a)\n\n shape = a.shape\n if axis > a.ndim or axis < -a.ndim - 1:\n # 2017-05-17, 1.13.0\n warnings.warn(\"Both axis > a.ndim and axis < -a.ndim - 1 are \"\n \"deprecated and will raise an AxisError in the future.\",\n DeprecationWarning, stacklevel=2)\n # When the deprecation period expires, delete this if block,\n if axis < 0:\n axis = axis + a.ndim + 1\n # and uncomment the following line.\n # axis = normalize_axis_index(axis, a.ndim + 1)\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\nrow_stack = vstack\n\n\ndef _column_stack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_column_stack_dispatcher)\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n \"\"\"\n arrays = []\n for v in tup:\n arr = array(v, copy=False, subok=True)\n if arr.ndim < 2:\n arr = array(arr, copy=False, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\n\ndef _dstack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_dstack_dispatcher)\ndef dstack(tup):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n See Also\n --------\n stack : Join a sequence of arrays along a new axis.\n vstack : Stack along first axis.\n hstack : Stack along second axis.\n concatenate : Join a sequence of arrays along an existing axis.\n dsplit : Split array along third axis.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n \"\"\"\n return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if _nx.ndim(sub_arys[i]) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\n\ndef _array_split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_array_split_dispatcher)\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis. For an array of length l that should be split\n into n sections, it returns l % n sub-arrays of size l//n + 1\n and the rest of size l//n.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n\n \"\"\"\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try:\n # handle array case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError:\n # indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = ([0] +\n extras * [Neach_section+1] +\n (Nsections-extras) * [Neach_section])\n div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]\n end = div_points[i + 1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n return sub_arys\n\n\ndef _split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_split_dispatcher)\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([0., 1., 2.]),\n array([3., 4.]),\n array([5.]),\n array([6., 7.]),\n array([], dtype=float64)]\n\n \"\"\"\n try:\n len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError(\n 'array split does not result in an equal division')\n res = array_split(ary, indices_or_sections, axis)\n return res\n\n\ndef _hvdsplit_dispatcher(ary, indices_or_sections):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef hsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[0., 1.]],\n [[4., 5.]]]),\n array([[[2., 3.]],\n [[6., 7.]]])]\n\n \"\"\"\n if _nx.ndim(ary) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef vsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n if _nx.ndim(ary) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n \"\"\"\n if _nx.ndim(ary) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\ndef get_array_prepare(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_prepare__) for i, x in enumerate(args)\n if hasattr(x, '__array_prepare__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef get_array_wrap(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\n\ndef _kron_dispatcher(a, b):\n return (a, b)\n\n\n@array_function_dispatch(_kron_dispatcher)\ndef kron(a, b):\n \"\"\"\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, ..., 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, ..., 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[1., 1., 0., 0.],\n [1., 1., 0., 0.],\n [0., 0., 1., 1.],\n [0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n \"\"\"\n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n nd = ndb\n if (ndb != nda):\n if (ndb > nda):\n as_ = (1,)*(ndb-nda) + as_\n else:\n bs = (1,)*(nda-ndb) + bs\n nd = nda\n result = outer(a, b).reshape(as_+bs)\n axis = nd-1\n for _ in range(nd):\n result = concatenate(result, axis=axis)\n wrapper = get_array_prepare(a, b)\n if wrapper is not None:\n result = wrapper(result)\n wrapper = get_array_wrap(a, b)\n if wrapper is not None:\n result = wrapper(result)\n return result\n\n\ndef _tile_dispatcher(A, reps):\n return (A, reps)\n\n\n@array_function_dispatch(_tile_dispatcher)\ndef tile(A, reps):\n \"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Note : Although tile may be used for broadcasting, it is strongly\n recommended to use numpy's broadcasting operations and functions.\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n broadcast_to : Broadcast an array to a new shape\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4]])\n \"\"\"\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):\n # Fixes the problem that the function does not make a copy if A is a\n # numpy array and the repetitions are 1 in all dimensions\n return _nx.array(A, copy=True, subok=True, ndmin=d)\n else:\n # Note that no copy of zero-sized arrays is made. However since they\n # have no data there is no risk of an inadvertent overwrite.\n c = _nx.array(A, copy=False, subok=True, ndmin=d)\n if (d < c.ndim):\n tup = (1,)*(c.ndim-d) + tup\n shape_out = tuple(s*t for s, t in zip(c.shape, tup))\n n = c.size\n if n > 0:\n for dim_in, nrep in zip(c.shape, tup):\n if nrep != 1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n n //= dim_in\n return c.reshape(shape_out)\n", "path": "numpy/lib/shape_base.py" } ]
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index ac2a2560498b..8ebe7a695f22 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -532,8 +532,7 @@ def expand_dims(a, axis): Returns ------- res : ndarray - Output array. The number of dimensions is one greater than that of - the input array. + View of `a` with the number of dimensions increased by one. See Also --------
medtagger__MedTagger-442
Getting random scan for non-existing task key results in 500 ## Current Behavior Providing non existing task key results in 500 HTTP code. ## Expected Behavior Backend should handle this situation appropriate and return 404 HTTP code. ## Steps to Reproduce the Problem 1. Perform a GET `scans/random?task=<task_key>` and provide non existing key.
[ { "content": "\"\"\"Module responsible for definition of TaskRepository.\"\"\"\nfrom typing import List\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import Task, LabelTag, Dataset\nfrom medtagger.exceptions import InternalErrorException\n\n\ndef get_all_tasks(include_disabled: bool = False) -> List[Task]:\n \"\"\"Fetch all tasks from database ordered by key.\"\"\"\n query = Task.query\n if not include_disabled:\n query = query.filter(~Task.disabled)\n return query.order_by(Task.key).all()\n\n\ndef get_task_by_key(key: str) -> Task:\n \"\"\"Fetch Task from database.\n\n :param key: key for a Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = session.query(Task).filter(Task.key == key).one()\n return task\n\n\ndef add_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Add new Task to the database.\n\n :param key: key that will identify such Task\n :param name: name that will be used in the Use Interface for such Task\n :param image_path: path to the image that represents such Task (used in User Interface)\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = Task(key, name, image_path)\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n task.available_tags = tags\n session.add(task)\n return task\n\n\ndef assign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Assign existing Label Tag to Task.\n\n :param tag: tag that should be assigned to Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.append(tag)\n task.save()\n\n\ndef unassign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Unassign Label Tag from Task.\n\n :param tag: tag that should be unassigned from Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.remove(tag)\n task.save()\n\n\ndef update(task_key: str, name: str = None, image_path: str = None, datasets_keys: List[str] = None) -> Task:\n \"\"\"Update Datasets where this Task will be available.\n\n :param task_key: key that will identify such Task\n :param name: (optional) new name for such Task\n :param image_path: (optional) new path to the image which shows on the UI\n :param datasets_keys: (optional) keys of Datasets which should have this Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n if name:\n task.name = name\n if image_path:\n task.image_path = image_path\n if datasets_keys:\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n return task\n\n\ndef disable(task_key: str) -> None:\n \"\"\"Disable existing Task.\"\"\"\n disabling_query = Task.query.filter(Task.key == task_key)\n updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not disabled due to unknown database error.')\n\n\ndef enable(task_key: str) -> None:\n \"\"\"Enable existing Task.\"\"\"\n enabling_query = Task.query.filter(Task.key == task_key)\n updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not enabled due to unknown database error.')\n", "path": "backend/medtagger/repositories/tasks.py" } ]
[ { "content": "\"\"\"Module responsible for definition of TaskRepository.\"\"\"\nfrom typing import List\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import Task, LabelTag, Dataset\nfrom medtagger.exceptions import InternalErrorException\n\n\ndef get_all_tasks(include_disabled: bool = False) -> List[Task]:\n \"\"\"Fetch all tasks from database ordered by key.\"\"\"\n query = Task.query\n if not include_disabled:\n query = query.filter(~Task.disabled)\n return query.order_by(Task.key).all()\n\n\ndef get_task_by_key(key: str) -> Task:\n \"\"\"Fetch Task from database.\n\n :param key: key for a Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = session.query(Task).filter(Task.key == key).first()\n return task\n\n\ndef add_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Add new Task to the database.\n\n :param key: key that will identify such Task\n :param name: name that will be used in the Use Interface for such Task\n :param image_path: path to the image that represents such Task (used in User Interface)\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = Task(key, name, image_path)\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n task.available_tags = tags\n session.add(task)\n return task\n\n\ndef assign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Assign existing Label Tag to Task.\n\n :param tag: tag that should be assigned to Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.append(tag)\n task.save()\n\n\ndef unassign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Unassign Label Tag from Task.\n\n :param tag: tag that should be unassigned from Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.remove(tag)\n task.save()\n\n\ndef update(task_key: str, name: str = None, image_path: str = None, datasets_keys: List[str] = None) -> Task:\n \"\"\"Update Datasets where this Task will be available.\n\n :param task_key: key that will identify such Task\n :param name: (optional) new name for such Task\n :param image_path: (optional) new path to the image which shows on the UI\n :param datasets_keys: (optional) keys of Datasets which should have this Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n if name:\n task.name = name\n if image_path:\n task.image_path = image_path\n if datasets_keys:\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n return task\n\n\ndef disable(task_key: str) -> None:\n \"\"\"Disable existing Task.\"\"\"\n disabling_query = Task.query.filter(Task.key == task_key)\n updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not disabled due to unknown database error.')\n\n\ndef enable(task_key: str) -> None:\n \"\"\"Enable existing Task.\"\"\"\n enabling_query = Task.query.filter(Task.key == task_key)\n updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not enabled due to unknown database error.')\n", "path": "backend/medtagger/repositories/tasks.py" } ]
diff --git a/backend/medtagger/repositories/tasks.py b/backend/medtagger/repositories/tasks.py index 71926114..12aa8fb0 100644 --- a/backend/medtagger/repositories/tasks.py +++ b/backend/medtagger/repositories/tasks.py @@ -21,7 +21,7 @@ def get_task_by_key(key: str) -> Task: :return: Task object """ with db_session() as session: - task = session.query(Task).filter(Task.key == key).one() + task = session.query(Task).filter(Task.key == key).first() return task
pwr-Solaar__Solaar-2003
Solaar exits with RC 1 when terminated **Information** <!-- Make sure that your issue is not one of the known issues in the Solaar documentation at https://pwr-solaar.github.io/Solaar/ --> <!-- Do not bother opening an issue for a version older than 1.1.0. Upgrade to the latest version and see if your issue persists. --> <!-- If you not running the current version of Solaar, strongly consider upgrading to the newest version. --> - Solaar version (`solaar --version` or `git describe --tags` if cloned from this repository): solaar 1.1.8+dfsg-2 - Distribution: Debian testing - Kernel version (ex. `uname -srmo`): `Linux 6.1.0-3-amd64 x86_64 GNU/Linux` - Output of `solaar show`: <details> ``` ``` </details> - Contents of `~/.config/solaar/config.yaml` (or `~/.config/solaar/config.json` if `~/.config/solaar/config.yaml` not present): <details> ``` CONTENTS HERE ``` </details> - Errors or warrnings from Solaar: <!-- Under normal operation Solaar keeps a log of warning and error messages in ~/.tmp while it is running as a file starting with 'Solaar'. If this file is not available or does not have useful information you can run Solaar as `solaar -dd`, after killing any running Solaar processes to have Solaar log informational, warning, and error messages to stdout. --> **Describe the bug** When terminated, solaar exits with RC 1. This results in a failed unit if systemd is managing XDG autostart. No log warnings or stderr messages produced. **To Reproduce** Steps to reproduce the behavior: 1. run `solaar ; echo $?` 2. run `killall solaar` 3. see rc 1 printed Workaround: ``` cat << EOF > ~/.config/systemd/user/[email protected]/exit-tweak.conf [Service] SuccessExitStatus=0 1 EOF systemctl --user daemon-reload ```
[ { "content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport importlib\nimport logging\nimport os.path\nimport signal\nimport sys\nimport tempfile\n\nfrom logging import INFO as _INFO\nfrom logging import WARNING as _WARNING\n\nimport solaar.cli as _cli\nimport solaar.i18n as _i18n\n\nfrom solaar import NAME, __version__\n\n_log = logging.getLogger(__name__)\n\n#\n#\n#\n\n\ndef _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n try:\n if gi is not None:\n gi.require_version(gi_package, gi_version)\n return importlib.import_module(module)\n except (ImportError, ValueError):\n sys.exit('%s: missing required system package %s' % (NAME, os_package))\n\n\nbattery_icons_style = 'regular'\ntemp = tempfile.NamedTemporaryFile(prefix='Solaar_', mode='w', delete=True)\n\n\ndef _parse_arguments():\n import argparse\n arg_parser = argparse.ArgumentParser(\n prog=NAME.lower(), epilog='For more information see https://pwr-solaar.github.io/Solaar'\n )\n arg_parser.add_argument(\n '-d',\n '--debug',\n action='count',\n default=0,\n help='print logging messages, for debugging purposes (may be repeated for extra verbosity)'\n )\n arg_parser.add_argument(\n '-D',\n '--hidraw',\n action='store',\n dest='hidraw_path',\n metavar='PATH',\n help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2'\n )\n arg_parser.add_argument('--restart-on-wake-up', action='store_true', help='restart Solaar on sleep wake-up (experimental)')\n arg_parser.add_argument(\n '-w', '--window', choices=('show', 'hide', 'only'), help='start with window showing / hidden / only (no tray icon)'\n )\n arg_parser.add_argument(\n '-b',\n '--battery-icons',\n choices=('regular', 'symbolic', 'solaar'),\n help='prefer regular battery / symbolic battery / solaar icons'\n )\n arg_parser.add_argument('--tray-icon-size', type=int, help='explicit size for tray icons')\n arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n arg_parser.add_argument('--help-actions', action='store_true', help='print help for the optional actions')\n arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, help='optional actions to perform')\n\n args = arg_parser.parse_args()\n\n if args.help_actions:\n _cli.print_help()\n return\n\n if args.window is None:\n args.window = 'show' # default behaviour is to show main window\n\n global battery_icons_style\n battery_icons_style = args.battery_icons if args.battery_icons is not None else 'regular'\n global tray_icon_size\n tray_icon_size = args.tray_icon_size\n\n log_format = '%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n log_level = logging.ERROR - 10 * args.debug\n logging.getLogger('').setLevel(min(log_level, logging.WARNING))\n file_handler = logging.StreamHandler(temp)\n file_handler.setLevel(max(min(log_level, logging.WARNING), logging.INFO))\n file_handler.setFormatter(logging.Formatter(log_format))\n logging.getLogger('').addHandler(file_handler)\n if args.debug > 0:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(log_format))\n stream_handler.setLevel(log_level)\n logging.getLogger('').addHandler(stream_handler)\n\n if not args.action:\n if _log.isEnabledFor(logging.INFO):\n logging.info('language %s (%s), translations path %s', _i18n.language, _i18n.encoding, _i18n.path)\n\n return args\n\n\n# On first SIGINT, dump threads to stderr; on second, exit\ndef _handlesig(signl, stack):\n import faulthandler\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n\n if signl == int(signal.SIGINT):\n if _log.isEnabledFor(_INFO):\n faulthandler.dump_traceback()\n sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))\n else:\n sys.exit('')\n\n\ndef main():\n _require('pyudev', 'python3-pyudev')\n\n args = _parse_arguments()\n if not args:\n return\n if args.action:\n # if any argument, run comandline and exit\n return _cli.run(args.action, args.hidraw_path)\n\n gi = _require('gi', 'python3-gi (in Ubuntu) or python3-gobject (in Fedora)')\n _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n\n # handle ^C in console\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGINT, _handlesig)\n signal.signal(signal.SIGTERM, _handlesig)\n\n udev_file = '42-logitech-unify-permissions.rules'\n if _log.isEnabledFor(_WARNING) \\\n and not os.path.isfile('/etc/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/lib/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/local/lib/udev/rules.d/' + udev_file):\n _log.warning('Solaar udev file not found in expected location')\n _log.warning('See https://pwr-solaar.github.io/Solaar/installation for more information')\n try:\n import solaar.listener as listener\n import solaar.ui as ui\n\n listener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n import solaar.upower as _upower\n if args.restart_on_wake_up:\n _upower.watch(listener.start_all, listener.stop_all)\n else:\n _upower.watch(lambda: listener.ping_all(True))\n\n import solaar.configuration as _configuration\n _configuration.defer_saves = True # allow configuration saves to be deferred\n\n # main UI event loop\n ui.run_loop(listener.start_all, listener.stop_all, args.window != 'only', args.window != 'hide')\n except Exception:\n from traceback import format_exc\n sys.exit('%s: error: %s' % (NAME.lower(), format_exc()))\n\n temp.close()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/solaar/gtk.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport importlib\nimport logging\nimport os.path\nimport signal\nimport sys\nimport tempfile\n\nfrom logging import INFO as _INFO\nfrom logging import WARNING as _WARNING\n\nimport solaar.cli as _cli\nimport solaar.i18n as _i18n\n\nfrom solaar import NAME, __version__\n\n_log = logging.getLogger(__name__)\n\n#\n#\n#\n\n\ndef _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n try:\n if gi is not None:\n gi.require_version(gi_package, gi_version)\n return importlib.import_module(module)\n except (ImportError, ValueError):\n sys.exit('%s: missing required system package %s' % (NAME, os_package))\n\n\nbattery_icons_style = 'regular'\ntemp = tempfile.NamedTemporaryFile(prefix='Solaar_', mode='w', delete=True)\n\n\ndef _parse_arguments():\n import argparse\n arg_parser = argparse.ArgumentParser(\n prog=NAME.lower(), epilog='For more information see https://pwr-solaar.github.io/Solaar'\n )\n arg_parser.add_argument(\n '-d',\n '--debug',\n action='count',\n default=0,\n help='print logging messages, for debugging purposes (may be repeated for extra verbosity)'\n )\n arg_parser.add_argument(\n '-D',\n '--hidraw',\n action='store',\n dest='hidraw_path',\n metavar='PATH',\n help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2'\n )\n arg_parser.add_argument('--restart-on-wake-up', action='store_true', help='restart Solaar on sleep wake-up (experimental)')\n arg_parser.add_argument(\n '-w', '--window', choices=('show', 'hide', 'only'), help='start with window showing / hidden / only (no tray icon)'\n )\n arg_parser.add_argument(\n '-b',\n '--battery-icons',\n choices=('regular', 'symbolic', 'solaar'),\n help='prefer regular battery / symbolic battery / solaar icons'\n )\n arg_parser.add_argument('--tray-icon-size', type=int, help='explicit size for tray icons')\n arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n arg_parser.add_argument('--help-actions', action='store_true', help='print help for the optional actions')\n arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, help='optional actions to perform')\n\n args = arg_parser.parse_args()\n\n if args.help_actions:\n _cli.print_help()\n return\n\n if args.window is None:\n args.window = 'show' # default behaviour is to show main window\n\n global battery_icons_style\n battery_icons_style = args.battery_icons if args.battery_icons is not None else 'regular'\n global tray_icon_size\n tray_icon_size = args.tray_icon_size\n\n log_format = '%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n log_level = logging.ERROR - 10 * args.debug\n logging.getLogger('').setLevel(min(log_level, logging.WARNING))\n file_handler = logging.StreamHandler(temp)\n file_handler.setLevel(max(min(log_level, logging.WARNING), logging.INFO))\n file_handler.setFormatter(logging.Formatter(log_format))\n logging.getLogger('').addHandler(file_handler)\n if args.debug > 0:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(log_format))\n stream_handler.setLevel(log_level)\n logging.getLogger('').addHandler(stream_handler)\n\n if not args.action:\n if _log.isEnabledFor(logging.INFO):\n logging.info('language %s (%s), translations path %s', _i18n.language, _i18n.encoding, _i18n.path)\n\n return args\n\n\n# On first SIGINT, dump threads to stderr; on second, exit\ndef _handlesig(signl, stack):\n import faulthandler\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n\n if signl == int(signal.SIGINT):\n if _log.isEnabledFor(_INFO):\n faulthandler.dump_traceback()\n sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))\n else:\n sys.exit(0)\n\n\ndef main():\n _require('pyudev', 'python3-pyudev')\n\n args = _parse_arguments()\n if not args:\n return\n if args.action:\n # if any argument, run comandline and exit\n return _cli.run(args.action, args.hidraw_path)\n\n gi = _require('gi', 'python3-gi (in Ubuntu) or python3-gobject (in Fedora)')\n _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n\n # handle ^C in console\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGINT, _handlesig)\n signal.signal(signal.SIGTERM, _handlesig)\n\n udev_file = '42-logitech-unify-permissions.rules'\n if _log.isEnabledFor(_WARNING) \\\n and not os.path.isfile('/etc/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/lib/udev/rules.d/' + udev_file) \\\n and not os.path.isfile('/usr/local/lib/udev/rules.d/' + udev_file):\n _log.warning('Solaar udev file not found in expected location')\n _log.warning('See https://pwr-solaar.github.io/Solaar/installation for more information')\n try:\n import solaar.listener as listener\n import solaar.ui as ui\n\n listener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n import solaar.upower as _upower\n if args.restart_on_wake_up:\n _upower.watch(listener.start_all, listener.stop_all)\n else:\n _upower.watch(lambda: listener.ping_all(True))\n\n import solaar.configuration as _configuration\n _configuration.defer_saves = True # allow configuration saves to be deferred\n\n # main UI event loop\n ui.run_loop(listener.start_all, listener.stop_all, args.window != 'only', args.window != 'hide')\n except Exception:\n from traceback import format_exc\n sys.exit('%s: error: %s' % (NAME.lower(), format_exc()))\n\n temp.close()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/solaar/gtk.py" } ]
diff --git a/docs/devices/MX Master 3 for Business B028.text b/docs/devices/MX Master 3 for Business B028.text index e5c4dad082..d0b465acbd 100644 --- a/docs/devices/MX Master 3 for Business B028.text +++ b/docs/devices/MX Master 3 for Business B028.text @@ -1,14 +1,14 @@ -Solaar version 1.1.5 +solaar version 1.1.8 - 2: MX Master 3 for Business + 1: MX Master 3 for Business Device path : None WPID : B028 Codename : MX Master 3 B Kind : mouse Protocol : HID++ 4.5 - Serial number: 12617690 + Serial number: 18F3413B Model ID: B02800000000 - Unit ID: 12617690 + Unit ID: 18F3413B Bootloader: BL1 41.00.B0009 Firmware: RBM 14.00.B0009 Other: @@ -20,29 +20,92 @@ Solaar version 1.1.5 Firmware: Bootloader BL1 41.00.B0009 B0281D13EFC0 Firmware: Firmware RBM 14.00.B0009 B0281D13EFC0 Firmware: Other - Unit ID: 12617690 Model ID: B02800000000 Transport IDs: {'btleid': 'B028'} + Unit ID: 18F3413B Model ID: B02800000000 Transport IDs: {'btleid': 'B028'} 3: DEVICE NAME {0005} V0 Name: MX Master 3 for Business Kind: mouse 4: WIRELESS DEVICE STATUS {1D4B} V0 - 5: RESET {0020} V0 + 5: CONFIG CHANGE {0020} V0 6: CRYPTO ID {0021} V1 7: DEVICE FRIENDLY NAME {0007} V0 Friendly Name: MX Master 3 B 8: UNIFIED BATTERY {1004} V2 - Battery: 80%, recharging. + Battery: 95%, discharging. 9: REPROG CONTROLS V4 {1B04} V5 Key/Button Actions (saved): {Left Button:Left Click, Right Button:Right Click, Middle Button:Mouse Middle Button, Back Button:Mouse Back Button, Forward Button:Mouse Forward Button, Mouse Gesture Button:Gesture Button Navigation, Smart Shift:Smart Shift} Key/Button Actions : {Left Button:Left Click, Right Button:Right Click, Middle Button:Mouse Middle Button, Back Button:Mouse Back Button, Forward Button:Mouse Forward Button, Mouse Gesture Button:Gesture Button Navigation, Smart Shift:Smart Shift} -solaar: error: Traceback (most recent call last): - File "/usr/lib/python3.10/site-packages/solaar/cli/__init__.py", line 210, in run - m.run(c, args, _find_receiver, _find_device) - File "/usr/lib/python3.10/site-packages/solaar/cli/show.py", line 296, in run - _print_device(dev) - File "/usr/lib/python3.10/site-packages/solaar/cli/show.py", line 232, in _print_device - v = setting.val_to_string(setting._device.persister.get(setting.name)) - File "/usr/lib/python3.10/site-packages/logitech_receiver/settings.py", line 238, in val_to_string - return self._validator.to_string(value) - File "/usr/lib/python3.10/site-packages/logitech_receiver/settings.py", line 1086, in to_string - return '{' + ', '.join([element_to_string(k, value[k]) for k in sorted(value)]) + '}' -TypeError: '<' not supported between instances of 'str' and 'int' + Key/Button Diversion (saved): {Middle Button:Regular, Back Button:Regular, Forward Button:Regular, Mouse Gesture Button:Regular, Smart Shift:Regular} + Key/Button Diversion : {Middle Button:Regular, Back Button:Regular, Forward Button:Regular, Mouse Gesture Button:Regular, Smart Shift:Regular} + 10: CHANGE HOST {1814} V1 + Change Host : 1:bork + 11: XY STATS {2250} V1 + 12: ADJUSTABLE DPI {2201} V2 + Sensitivity (DPI) (saved): 1000 + Sensitivity (DPI) : 1000 + 13: SMART SHIFT {2110} V0 + Scroll Wheel Ratcheted (saved): Freespinning + Scroll Wheel Ratcheted : Freespinning + Scroll Wheel Ratchet Speed (saved): 1 + Scroll Wheel Ratchet Speed : 1 + 14: HIRES WHEEL {2121} V1 + Multiplier: 15 + Has invert: Normal wheel motion + Has ratchet switch: Free wheel mode + Low resolution mode + HID notification + Scroll Wheel Direction (saved): False + Scroll Wheel Direction : False + Scroll Wheel Resolution (saved): False + Scroll Wheel Resolution : False + Scroll Wheel Diversion (saved): False + Scroll Wheel Diversion : False + 15: THUMB WHEEL {2150} V0 + Thumb Wheel Direction (saved): False + Thumb Wheel Direction : False + Thumb Wheel Diversion (saved): False + Thumb Wheel Diversion : False + 16: WHEEL STATS {2251} V0 + 17: DFUCONTROL {00C3} V0 + 18: DEVICE RESET {1802} V0 internal, hidden, unknown:000010 + 19: unknown:1803 {1803} V0 internal, hidden, unknown:000010 + 20: CONFIG DEVICE PROPS {1806} V8 internal, hidden, unknown:000010 + 21: unknown:1816 {1816} V0 internal, hidden, unknown:000010 + 22: OOBSTATE {1805} V0 internal, hidden + 23: unknown:1830 {1830} V0 internal, hidden, unknown:000010 + 24: unknown:1891 {1891} V6 internal, hidden, unknown:000008 + 25: unknown:18A1 {18A1} V0 internal, hidden, unknown:000010 + 26: unknown:1E00 {1E00} V0 hidden + 27: unknown:1E02 {1E02} V0 internal, hidden + 28: unknown:1602 {1602} V0 + 29: unknown:1EB0 {1EB0} V0 internal, hidden, unknown:000010 + 30: unknown:1861 {1861} V0 internal, hidden, unknown:000010 + 31: unknown:9300 {9300} V0 internal, hidden, unknown:000010 + 32: unknown:9001 {9001} V0 internal, hidden, unknown:000010 + 33: unknown:1E22 {1E22} V0 internal, hidden, unknown:000010 + 34: unknown:9205 {9205} V0 internal, hidden, unknown:000010 + Has 8 reprogrammable keys: + 0: Left Button , default: Left Click => Left Click + mse, analytics key events, pos:0, group:1, group mask:g1 + reporting: default + 1: Right Button , default: Right Click => Right Click + mse, analytics key events, pos:0, group:1, group mask:g1 + reporting: default + 2: Middle Button , default: Mouse Middle Button => Mouse Middle Button + mse, reprogrammable, divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2 + reporting: default + 3: Back Button , default: Mouse Back Button => Mouse Back Button + mse, reprogrammable, divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2 + reporting: default + 4: Forward Button , default: Mouse Forward Button => Mouse Forward Button + mse, reprogrammable, divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2 + reporting: default + 5: Mouse Gesture Button , default: Gesture Button Navigation => Gesture Button Navigation + mse, reprogrammable, divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2 + reporting: default + 6: Smart Shift , default: Smart Shift => Smart Shift + mse, reprogrammable, divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2 + reporting: default + 7: Virtual Gesture Button , default: Virtual Gesture Button => Virtual Gesture Button + divertable, virtual, raw XY, force raw XY, pos:0, group:3, group mask:empty + reporting: default + Battery: 95%, discharging. diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py index 6676ea8609..86875bf70c 100755 --- a/lib/solaar/gtk.py +++ b/lib/solaar/gtk.py @@ -133,7 +133,7 @@ def _handlesig(signl, stack): faulthandler.dump_traceback() sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower())) else: - sys.exit('') + sys.exit(0) def main():
acl-org__acl-anthology-3022
Paper Metadata: 2023.findings-emnlp.1054 ### Confirm that this is a metadata correction - [X] I want to file corrections to make the metadata match the PDF file hosted on the ACL Anthology. ### Anthology ID 2023.findings-emnlp.1054 ### Type of Paper Metadata Correction - [X] Paper Title - [ ] Paper Abstract - [ ] Author Name(s) ### Correction to Paper Title Please change the paper title appeared in Cite (ACL) and Cite (Informal) to "Measuring Pointwise 𝒱-Usable Information In-Context-ly" ### Correction to Paper Abstract _No response_ ### Correction to Author Name(s) _No response_
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom lxml import etree\nimport csv\nimport logging as log\nimport os\nfrom TexSoup import TexSoup\nfrom TexSoup.data import TexCmd, TexText, TexGroup\n\nFUNCTION_NAMES = (\"lim\", \"log\")\nTEX_TO_HTML = {\n \"mathrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"textrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"text\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"mathbf\": (\"strong\", {}),\n \"textbf\": (\"strong\", {}),\n \"boldsymbol\": (\"strong\", {}),\n \"mathit\": (\"em\", {}),\n \"textit\": (\"em\", {}),\n \"emph\": (\"em\", {}),\n \"textsc\": (\"span\", {\"style\": \"font-variant: small-caps;\"}),\n \"texttt\": (\"span\", {\"class\": \"text-monospace\"}),\n \"textsubscript\": (\"sub\", {}),\n \"textsuperscript\": (\"sup\", {}),\n}\nREMOVED_COMMANDS = (\"bf\", \"rm\", \"it\", \"sc\")\n\n\ndef _append_text(text, trg):\n if not text:\n return\n if len(trg):\n if trg[-1].tail is not None:\n trg[-1].tail += text\n else:\n trg[-1].tail = text\n else:\n if trg.text is not None:\n trg.text += text\n else:\n trg.text = text\n\n\nclass TexMath:\n \"\"\"Interpreter and converter for TeX inline math expressions.\n\n This class uses TexSoup (https://github.com/alvinwan/TexSoup) to parse a TeX\n expression and converts it to valid HTML. The conversion combines a small\n number of handwritten rules with a mapping of LaTeX math mode commands to\n Unicode symbols (http://milde.users.sourceforge.net/LUCR/Math/). Parts that\n cannot be interpreted using this simple method are preserved as raw LaTeX.\n \"\"\"\n\n def __init__(self, symbolsfile=None):\n self.cmd_map = {}\n if symbolsfile is None:\n symbolsfile = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"unimathsymbols.txt\"\n )\n self.load_symbols(symbolsfile)\n\n def load_symbols(self, filename):\n with open(filename, \"r\", encoding=\"utf8\") as f:\n reader = csv.reader(f, delimiter=\"^\")\n for row in reader:\n if row[0].startswith(\"#\"): # comment\n continue\n assert len(row) == 8, \"Expect eight-column format\"\n char, cmd = row[1], row[2]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n if row[-1].startswith(\"= \") and \", \" in row[-1]:\n # last column sometimes contains alternative command\n cmd = row[-1][2:].split(\", \")[0]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n\n def _parse(self, everything, trg):\n \"\"\"Parses a list of TeX constituents into an lxml.etree._Element.\n\n Arguments:\n everything: An iterator over TeX constituents as provided by TexSoup\n trg: The lxml.etree._Element to parse the expression into\n\n The approach of iterating over the TeX constituents roughly follows\n <https://github.com/alvinwan/TexSoup/blob/master/examples/list_everything.py>.\n \"\"\"\n sxscript = False # Tracks whether we're in a subscript/superscript\n for code in everything:\n if isinstance(code, TexCmd):\n # code is a TeX command\n self._parse_command(code, trg)\n elif isinstance(code, TexText) or isinstance(code, str):\n # code is text\n sxscript = self._parse_text(code, trg)\n elif isinstance(code, TexGroup):\n # If in subscript/superscript, wrap the entire element in respective tag\n if sxscript:\n my_trg = etree.Element(sxscript)\n self._parse(code.contents, my_trg)\n trg.append(my_trg)\n sxscript = False\n # Otherwise, just parse it normally\n else:\n self._parse(code.contents, trg)\n else:\n log.error(f\"TeX-math parser got unhandled element: {type(code)}\")\n\n def _parse_command(self, code, trg):\n args = list(code.args)\n name = str(code.name)\n # Check if the command is in the list of known Unicode mappings\n if name in self.cmd_map:\n _append_text(self.cmd_map[name], trg)\n self._parse(args, trg)\n # Check if command + arguments is in the list of known Unicode mappings\n # (this covers commands like \"\\mathcal{A}\", which have their own entries)\n elif str(code)[1:] in self.cmd_map:\n _append_text(self.cmd_map[str(code)[1:]], trg)\n # Check if command is a known function name (e.g. \"log\")\n elif name in FUNCTION_NAMES:\n sx = etree.Element(\"span\")\n sx.attrib[\"class\"] = \"tex-math-function\"\n sx.text = str(name)\n trg.append(sx)\n self._parse(args, trg)\n # Handle fractions\n elif name == \"frac\":\n self._parse_fraction(args, trg)\n # Handle commands with simple HTML tag substitutions\n elif name in TEX_TO_HTML:\n elem_name, elem_attrib = TEX_TO_HTML[name]\n sx = etree.Element(elem_name, attrib=elem_attrib)\n self._parse(args, sx)\n trg.append(sx)\n # Known, but unsupported formatting tags that will just be removed\n elif name in REMOVED_COMMANDS and not args:\n pass\n # Give up, but preserve element\n else:\n log.warn(f\"Unknown TeX-math command: {code}\")\n self._append_unparsed(code, trg)\n\n def _parse_fraction(self, args, trg):\n if len(args) != 2:\n log.warn(f\"Couldn't parse \\\\frac: got {len(args)} arguments, expected 2\")\n self._append_unparsed({'name': 'frac', 'args': args}, trg)\n else:\n # Represent numerator of fraction as superscript\n sx = etree.Element(\"sup\")\n self._parse([args[0]], sx)\n trg.append(sx)\n # Unicode symbol for fraction slash\n _append_text(\"\\u2044\", trg)\n # Represent denominator of fraction as subscript\n sx = etree.Element(\"sub\")\n self._parse([args[1]], sx)\n trg.append(sx)\n\n def _parse_text(self, code, trg):\n text = str(code)\n # TexSoup doesn't parse any non-alpha command as a command. Ex: \\$\n # However it does seperate them into their own text part. Ex: 'r\\\\&dd' -> ['r', '\\\\&', 'dd']\n # Therefore try to do command mapping replacement of all text beginning with \\ and of length 2\n if len(text) == 2 and text[0] == '\\\\':\n text = self.cmd_map.get(text[1], text)\n _append_text(text, trg)\n return\n # parse ^ and _ (won't get recognized as separate nodes by TexSoup)\n sxscript = False\n if \"^\" in text or \"_\" in text:\n buf = \"\"\n for char in text:\n if char == \"^\" or char == \"_\":\n _append_text(buf, trg)\n buf = \"\"\n sxscript = \"sup\" if char == \"^\" else \"sub\"\n elif sxscript:\n sx = etree.Element(sxscript)\n sx.text = char\n trg.append(sx)\n sxscript = False\n else:\n buf += char\n text = buf\n # Append as text\n _append_text(text, trg)\n return sxscript\n\n def _append_unparsed(self, code, trg):\n pre = etree.Element(\"span\")\n pre.attrib[\"class\"] = \"tex-math-unparsed\"\n pre.text = f\"\\\\{code.name}{code.args}\"\n trg.append(pre)\n\n def etree_to_html(self, element):\n result = etree.Element(\"span\")\n result.attrib[\"class\"] = \"tex-math\"\n result.tail = element.tail # Preserve tail\n self._parse(TexSoup(element.text).expr.all, result)\n return result\n\n def to_html(self, element):\n \"\"\"Converts a TeX math expression to HTML markup.\"\"\"\n if isinstance(element, etree._Element):\n return self.etree_to_html(element)\n elif isinstance(element, str):\n value = self.etree_to_html(etree.fromstring(f\"<span>{element}</span>\"))\n return etree.tostring(value)\n raise NotImplementedError(f\"Cannot convert elements of type {type(element)}\")\n\n def to_unicode(self, element):\n \"\"\"Converts a TeX math expression to a Unicode string.\n\n This will perform the same conversions as `to_html()`, but strip out the\n HTML tags afterwards.\n \"\"\"\n element = self.to_html(element)\n return etree.tostring(element, encoding=\"unicode\", method=\"text\")\n", "path": "bin/anthology/texmath.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom lxml import etree\nimport csv\nimport logging as log\nimport os\nfrom TexSoup import TexSoup\nfrom TexSoup.data import TexCmd, TexText, TexGroup\n\nFUNCTION_NAMES = (\"lim\", \"log\")\nTEX_TO_HTML = {\n \"mathrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"textrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"text\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"mathbf\": (\"strong\", {}),\n \"textbf\": (\"strong\", {}),\n \"boldsymbol\": (\"strong\", {}),\n \"mathit\": (\"em\", {}),\n \"textit\": (\"em\", {}),\n \"emph\": (\"em\", {}),\n \"textsc\": (\"span\", {\"style\": \"font-variant: small-caps;\"}),\n \"texttt\": (\"span\", {\"class\": \"text-monospace\"}),\n \"textsubscript\": (\"sub\", {}),\n \"textsuperscript\": (\"sup\", {}),\n}\nREMOVED_COMMANDS = (\"bf\", \"rm\", \"it\", \"sc\")\n\n\ndef _append_text(text, trg):\n if not text:\n return\n if len(trg):\n if trg[-1].tail is not None:\n trg[-1].tail += text\n else:\n trg[-1].tail = text\n else:\n if trg.text is not None:\n trg.text += text\n else:\n trg.text = text\n\n\nclass TexMath:\n \"\"\"Interpreter and converter for TeX inline math expressions.\n\n This class uses TexSoup (https://github.com/alvinwan/TexSoup) to parse a TeX\n expression and converts it to valid HTML. The conversion combines a small\n number of handwritten rules with a mapping of LaTeX math mode commands to\n Unicode symbols (http://milde.users.sourceforge.net/LUCR/Math/). Parts that\n cannot be interpreted using this simple method are preserved as raw LaTeX.\n \"\"\"\n\n def __init__(self, symbolsfile=None):\n self.cmd_map = {}\n if symbolsfile is None:\n symbolsfile = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"unimathsymbols.txt\"\n )\n self.load_symbols(symbolsfile)\n\n def load_symbols(self, filename):\n with open(filename, \"r\", encoding=\"utf8\") as f:\n reader = csv.reader(f, delimiter=\"^\")\n for row in reader:\n if row[0].startswith(\"#\"): # comment\n continue\n assert len(row) == 8, \"Expect eight-column format\"\n char, cmd = row[1], row[2]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n if row[-1].startswith(\"= \") and \", \" in row[-1]:\n # last column sometimes contains alternative command\n cmd = row[-1][2:].split(\", \")[0]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n\n def _parse(self, everything, trg):\n \"\"\"Parses a list of TeX constituents into an lxml.etree._Element.\n\n Arguments:\n everything: An iterator over TeX constituents as provided by TexSoup\n trg: The lxml.etree._Element to parse the expression into\n\n The approach of iterating over the TeX constituents roughly follows\n <https://github.com/alvinwan/TexSoup/blob/master/examples/list_everything.py>.\n \"\"\"\n sxscript = False # Tracks whether we're in a subscript/superscript\n for code in everything:\n if isinstance(code, TexCmd):\n # code is a TeX command\n self._parse_command(code, trg)\n elif isinstance(code, TexText) or isinstance(code, str):\n # code is text\n sxscript = self._parse_text(code, trg)\n elif isinstance(code, TexGroup):\n # If in subscript/superscript, wrap the entire element in respective tag\n if sxscript:\n my_trg = etree.Element(sxscript)\n self._parse(code.contents, my_trg)\n trg.append(my_trg)\n sxscript = False\n # Otherwise, just parse it normally\n else:\n self._parse(code.contents, trg)\n else:\n log.error(f\"TeX-math parser got unhandled element: {type(code)}\")\n\n def _parse_command(self, code, trg):\n args = list(code.args)\n name = str(code.name)\n # Check if the command is in the list of known Unicode mappings\n if name in self.cmd_map:\n _append_text(self.cmd_map[name], trg)\n self._parse(args, trg)\n # Check if command + arguments is in the list of known Unicode mappings\n # (this covers commands like \"\\mathcal{A}\", which have their own entries)\n elif str(code)[1:] in self.cmd_map:\n _append_text(self.cmd_map[str(code)[1:]], trg)\n # Check if command is a known function name (e.g. \"log\")\n elif name in FUNCTION_NAMES:\n sx = etree.Element(\"span\")\n sx.attrib[\"class\"] = \"tex-math-function\"\n sx.text = str(name)\n trg.append(sx)\n self._parse(args, trg)\n # Handle fractions\n elif name == \"frac\":\n self._parse_fraction(args, trg)\n # Handle commands with simple HTML tag substitutions\n elif name in TEX_TO_HTML:\n elem_name, elem_attrib = TEX_TO_HTML[name]\n sx = etree.Element(elem_name, attrib=elem_attrib)\n self._parse(args, sx)\n trg.append(sx)\n # Known, but unsupported formatting tags that will just be removed\n elif name in REMOVED_COMMANDS and not args:\n pass\n # Give up, but preserve element\n else:\n log.warn(f\"Unknown TeX-math command: {code}\")\n self._append_unparsed(code, trg)\n\n def _parse_fraction(self, args, trg):\n if len(args) != 2:\n log.warn(f\"Couldn't parse \\\\frac: got {len(args)} arguments, expected 2\")\n self._append_unparsed({'name': 'frac', 'args': args}, trg)\n else:\n # Represent numerator of fraction as superscript\n sx = etree.Element(\"sup\")\n self._parse([args[0]], sx)\n trg.append(sx)\n # Unicode symbol for fraction slash\n _append_text(\"\\u2044\", trg)\n # Represent denominator of fraction as subscript\n sx = etree.Element(\"sub\")\n self._parse([args[1]], sx)\n trg.append(sx)\n\n def _parse_text(self, code, trg):\n text = str(code)\n # TexSoup doesn't parse any non-alpha command as a command. Ex: \\$\n # However it does seperate them into their own text part. Ex: 'r\\\\&dd' -> ['r', '\\\\&', 'dd']\n # Therefore try to do command mapping replacement of all text beginning with \\ and of length 2\n if len(text) == 2 and text[0] == '\\\\':\n text = self.cmd_map.get(text[1], text)\n _append_text(text, trg)\n return\n # parse ^ and _ (won't get recognized as separate nodes by TexSoup)\n sxscript = False\n if \"^\" in text or \"_\" in text:\n buf = \"\"\n for char in text:\n if char == \"^\" or char == \"_\":\n _append_text(buf, trg)\n buf = \"\"\n sxscript = \"sup\" if char == \"^\" else \"sub\"\n elif sxscript:\n sx = etree.Element(sxscript)\n sx.text = char\n trg.append(sx)\n sxscript = False\n else:\n buf += char\n text = buf\n # Append as text\n _append_text(text, trg)\n return sxscript\n\n def _append_unparsed(self, code, trg):\n pre = etree.Element(\"span\")\n pre.attrib[\"class\"] = \"tex-math-unparsed\"\n pre.text = f\"\\\\{code.name}{code.args}\"\n trg.append(pre)\n\n def etree_to_html(self, element):\n result = etree.Element(\"span\")\n result.attrib[\"class\"] = \"tex-math\"\n result.tail = element.tail # Preserve tail\n self._parse(TexSoup(element.text).expr.all, result)\n return result\n\n def to_html(self, element):\n \"\"\"Converts a TeX math expression to HTML markup.\"\"\"\n if isinstance(element, etree._Element):\n return self.etree_to_html(element)\n elif isinstance(element, str):\n value = self.etree_to_html(etree.fromstring(f\"<span>{element}</span>\"))\n return etree.tostring(value)\n raise NotImplementedError(f\"Cannot convert elements of type {type(element)}\")\n\n def to_unicode(self, element):\n \"\"\"Converts a TeX math expression to a Unicode string.\n\n This will perform the same conversions as `to_html()`, but strip out the\n HTML tags afterwards.\n \"\"\"\n element = self.to_html(element)\n return etree.tostring(element, encoding=\"unicode\", method=\"text\", with_tail=False)\n", "path": "bin/anthology/texmath.py" } ]
diff --git a/bin/anthology/texmath.py b/bin/anthology/texmath.py index dedb347c9c..42e4f21509 100644 --- a/bin/anthology/texmath.py +++ b/bin/anthology/texmath.py @@ -230,4 +230,4 @@ def to_unicode(self, element): HTML tags afterwards. """ element = self.to_html(element) - return etree.tostring(element, encoding="unicode", method="text") + return etree.tostring(element, encoding="unicode", method="text", with_tail=False) diff --git a/tests/test_tex_math.py b/tests/test_tex_math.py index a8e5f48af5..2bedf9bc1b 100644 --- a/tests/test_tex_math.py +++ b/tests/test_tex_math.py @@ -263,6 +263,8 @@ def test_unicode(inp, out): element = etree.fromstring(f"<span>{inp}</span>") math_element = element.find(".//tex-math") actual_out = texmath.to_unicode(math_element) + if math_element.tail: + actual_out += math_element.tail assert actual_out == out
docker__docker-py-1647
DockerClient.secrets is not a property `DockerClient.secrets` is not decorated with `property` and so must be called rather than just accessed. This is inconsistent with the docs and similar collections on the client attribute (e.g. images, containers, etc.).
[ { "content": "from .api.client import APIClient\nfrom .models.containers import ContainerCollection\nfrom .models.images import ImageCollection\nfrom .models.networks import NetworkCollection\nfrom .models.nodes import NodeCollection\nfrom .models.plugins import PluginCollection\nfrom .models.secrets import SecretCollection\nfrom .models.services import ServiceCollection\nfrom .models.swarm import Swarm\nfrom .models.volumes import VolumeCollection\nfrom .utils import kwargs_from_env\n\n\nclass DockerClient(object):\n \"\"\"\n A client for communicating with a Docker server.\n\n Example:\n\n >>> import docker\n >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')\n\n Args:\n base_url (str): URL to the Docker server. For example,\n ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.\n version (str): The version of the API to use. Set to ``auto`` to\n automatically detect the server's version. Default: ``1.26``\n timeout (int): Default timeout for API calls, in seconds.\n tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass\n ``True`` to enable it with default options, or pass a\n :py:class:`~docker.tls.TLSConfig` object to use custom\n configuration.\n user_agent (str): Set a custom user agent for requests to the server.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.api = APIClient(*args, **kwargs)\n\n @classmethod\n def from_env(cls, **kwargs):\n \"\"\"\n Return a client configured from environment variables.\n\n The environment variables used are the same as those used by the\n Docker command-line client. They are:\n\n .. envvar:: DOCKER_HOST\n\n The URL to the Docker host.\n\n .. envvar:: DOCKER_TLS_VERIFY\n\n Verify the host against a CA certificate.\n\n .. envvar:: DOCKER_CERT_PATH\n\n A path to a directory containing TLS certificates to use when\n connecting to the Docker host.\n\n Args:\n version (str): The version of the API to use. Set to ``auto`` to\n automatically detect the server's version. Default: ``1.26``\n timeout (int): Default timeout for API calls, in seconds.\n ssl_version (int): A valid `SSL version`_.\n assert_hostname (bool): Verify the hostname of the server.\n environment (dict): The environment to read environment variables\n from. Default: the value of ``os.environ``\n\n Example:\n\n >>> import docker\n >>> client = docker.from_env()\n\n .. _`SSL version`:\n https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1\n \"\"\"\n timeout = kwargs.pop('timeout', None)\n version = kwargs.pop('version', None)\n return cls(timeout=timeout, version=version,\n **kwargs_from_env(**kwargs))\n\n # Resources\n @property\n def containers(self):\n \"\"\"\n An object for managing containers on the server. See the\n :doc:`containers documentation <containers>` for full details.\n \"\"\"\n return ContainerCollection(client=self)\n\n @property\n def images(self):\n \"\"\"\n An object for managing images on the server. See the\n :doc:`images documentation <images>` for full details.\n \"\"\"\n return ImageCollection(client=self)\n\n @property\n def networks(self):\n \"\"\"\n An object for managing networks on the server. See the\n :doc:`networks documentation <networks>` for full details.\n \"\"\"\n return NetworkCollection(client=self)\n\n @property\n def nodes(self):\n \"\"\"\n An object for managing nodes on the server. See the\n :doc:`nodes documentation <nodes>` for full details.\n \"\"\"\n return NodeCollection(client=self)\n\n @property\n def plugins(self):\n \"\"\"\n An object for managing plugins on the server. See the\n :doc:`plugins documentation <plugins>` for full details.\n \"\"\"\n return PluginCollection(client=self)\n\n def secrets(self):\n \"\"\"\n An object for managing secrets on the server. See the\n :doc:`secrets documentation <secrets>` for full details.\n \"\"\"\n return SecretCollection(client=self)\n\n @property\n def services(self):\n \"\"\"\n An object for managing services on the server. See the\n :doc:`services documentation <services>` for full details.\n \"\"\"\n return ServiceCollection(client=self)\n\n @property\n def swarm(self):\n \"\"\"\n An object for managing a swarm on the server. See the\n :doc:`swarm documentation <swarm>` for full details.\n \"\"\"\n return Swarm(client=self)\n\n @property\n def volumes(self):\n \"\"\"\n An object for managing volumes on the server. See the\n :doc:`volumes documentation <volumes>` for full details.\n \"\"\"\n return VolumeCollection(client=self)\n\n # Top-level methods\n def events(self, *args, **kwargs):\n return self.api.events(*args, **kwargs)\n events.__doc__ = APIClient.events.__doc__\n\n def df(self):\n return self.api.df()\n df.__doc__ = APIClient.df.__doc__\n\n def info(self, *args, **kwargs):\n return self.api.info(*args, **kwargs)\n info.__doc__ = APIClient.info.__doc__\n\n def login(self, *args, **kwargs):\n return self.api.login(*args, **kwargs)\n login.__doc__ = APIClient.login.__doc__\n\n def ping(self, *args, **kwargs):\n return self.api.ping(*args, **kwargs)\n ping.__doc__ = APIClient.ping.__doc__\n\n def version(self, *args, **kwargs):\n return self.api.version(*args, **kwargs)\n version.__doc__ = APIClient.version.__doc__\n\n def __getattr__(self, name):\n s = [\"'DockerClient' object has no attribute '{}'\".format(name)]\n # If a user calls a method on APIClient, they\n if hasattr(APIClient, name):\n s.append(\"In Docker SDK for Python 2.0, this method is now on the \"\n \"object APIClient. See the low-level API section of the \"\n \"documentation for more details.\")\n raise AttributeError(' '.join(s))\n\n\nfrom_env = DockerClient.from_env\n", "path": "docker/client.py" } ]
[ { "content": "from .api.client import APIClient\nfrom .models.containers import ContainerCollection\nfrom .models.images import ImageCollection\nfrom .models.networks import NetworkCollection\nfrom .models.nodes import NodeCollection\nfrom .models.plugins import PluginCollection\nfrom .models.secrets import SecretCollection\nfrom .models.services import ServiceCollection\nfrom .models.swarm import Swarm\nfrom .models.volumes import VolumeCollection\nfrom .utils import kwargs_from_env\n\n\nclass DockerClient(object):\n \"\"\"\n A client for communicating with a Docker server.\n\n Example:\n\n >>> import docker\n >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')\n\n Args:\n base_url (str): URL to the Docker server. For example,\n ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.\n version (str): The version of the API to use. Set to ``auto`` to\n automatically detect the server's version. Default: ``1.26``\n timeout (int): Default timeout for API calls, in seconds.\n tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass\n ``True`` to enable it with default options, or pass a\n :py:class:`~docker.tls.TLSConfig` object to use custom\n configuration.\n user_agent (str): Set a custom user agent for requests to the server.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.api = APIClient(*args, **kwargs)\n\n @classmethod\n def from_env(cls, **kwargs):\n \"\"\"\n Return a client configured from environment variables.\n\n The environment variables used are the same as those used by the\n Docker command-line client. They are:\n\n .. envvar:: DOCKER_HOST\n\n The URL to the Docker host.\n\n .. envvar:: DOCKER_TLS_VERIFY\n\n Verify the host against a CA certificate.\n\n .. envvar:: DOCKER_CERT_PATH\n\n A path to a directory containing TLS certificates to use when\n connecting to the Docker host.\n\n Args:\n version (str): The version of the API to use. Set to ``auto`` to\n automatically detect the server's version. Default: ``1.26``\n timeout (int): Default timeout for API calls, in seconds.\n ssl_version (int): A valid `SSL version`_.\n assert_hostname (bool): Verify the hostname of the server.\n environment (dict): The environment to read environment variables\n from. Default: the value of ``os.environ``\n\n Example:\n\n >>> import docker\n >>> client = docker.from_env()\n\n .. _`SSL version`:\n https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1\n \"\"\"\n timeout = kwargs.pop('timeout', None)\n version = kwargs.pop('version', None)\n return cls(timeout=timeout, version=version,\n **kwargs_from_env(**kwargs))\n\n # Resources\n @property\n def containers(self):\n \"\"\"\n An object for managing containers on the server. See the\n :doc:`containers documentation <containers>` for full details.\n \"\"\"\n return ContainerCollection(client=self)\n\n @property\n def images(self):\n \"\"\"\n An object for managing images on the server. See the\n :doc:`images documentation <images>` for full details.\n \"\"\"\n return ImageCollection(client=self)\n\n @property\n def networks(self):\n \"\"\"\n An object for managing networks on the server. See the\n :doc:`networks documentation <networks>` for full details.\n \"\"\"\n return NetworkCollection(client=self)\n\n @property\n def nodes(self):\n \"\"\"\n An object for managing nodes on the server. See the\n :doc:`nodes documentation <nodes>` for full details.\n \"\"\"\n return NodeCollection(client=self)\n\n @property\n def plugins(self):\n \"\"\"\n An object for managing plugins on the server. See the\n :doc:`plugins documentation <plugins>` for full details.\n \"\"\"\n return PluginCollection(client=self)\n\n @property\n def secrets(self):\n \"\"\"\n An object for managing secrets on the server. See the\n :doc:`secrets documentation <secrets>` for full details.\n \"\"\"\n return SecretCollection(client=self)\n\n @property\n def services(self):\n \"\"\"\n An object for managing services on the server. See the\n :doc:`services documentation <services>` for full details.\n \"\"\"\n return ServiceCollection(client=self)\n\n @property\n def swarm(self):\n \"\"\"\n An object for managing a swarm on the server. See the\n :doc:`swarm documentation <swarm>` for full details.\n \"\"\"\n return Swarm(client=self)\n\n @property\n def volumes(self):\n \"\"\"\n An object for managing volumes on the server. See the\n :doc:`volumes documentation <volumes>` for full details.\n \"\"\"\n return VolumeCollection(client=self)\n\n # Top-level methods\n def events(self, *args, **kwargs):\n return self.api.events(*args, **kwargs)\n events.__doc__ = APIClient.events.__doc__\n\n def df(self):\n return self.api.df()\n df.__doc__ = APIClient.df.__doc__\n\n def info(self, *args, **kwargs):\n return self.api.info(*args, **kwargs)\n info.__doc__ = APIClient.info.__doc__\n\n def login(self, *args, **kwargs):\n return self.api.login(*args, **kwargs)\n login.__doc__ = APIClient.login.__doc__\n\n def ping(self, *args, **kwargs):\n return self.api.ping(*args, **kwargs)\n ping.__doc__ = APIClient.ping.__doc__\n\n def version(self, *args, **kwargs):\n return self.api.version(*args, **kwargs)\n version.__doc__ = APIClient.version.__doc__\n\n def __getattr__(self, name):\n s = [\"'DockerClient' object has no attribute '{}'\".format(name)]\n # If a user calls a method on APIClient, they\n if hasattr(APIClient, name):\n s.append(\"In Docker SDK for Python 2.0, this method is now on the \"\n \"object APIClient. See the low-level API section of the \"\n \"documentation for more details.\")\n raise AttributeError(' '.join(s))\n\n\nfrom_env = DockerClient.from_env\n", "path": "docker/client.py" } ]
diff --git a/docker/client.py b/docker/client.py index 09abd6332..66ef60f3f 100644 --- a/docker/client.py +++ b/docker/client.py @@ -119,6 +119,7 @@ def plugins(self): """ return PluginCollection(client=self) + @property def secrets(self): """ An object for managing secrets on the server. See the
e-valuation__EvaP-821
Revoke course approval It must be possible to revoke the approval of a course and to move it back to state `new`.
[ { "content": "import datetime\nimport random\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin, Group\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q\nfrom django.dispatch import Signal, receiver\nfrom django.template.base import TemplateSyntaxError, TemplateEncodingError\nfrom django.template import Context, Template\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.functional import cached_property\n\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\n\n\nlogger = logging.getLogger(__name__)\n\n\n# for converting state into student_state\nSTUDENT_STATES_NAMES = {\n 'new': 'upcoming',\n 'prepared': 'upcoming',\n 'editor_approved': 'upcoming',\n 'approved': 'upcoming',\n 'in_evaluation': 'in_evaluation',\n 'evaluated': 'evaluationFinished',\n 'reviewed': 'evaluationFinished',\n 'published': 'published'\n}\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n is_archived = models.BooleanField(default=False, verbose_name=_(\"is archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def is_archiveable(self):\n return not self.is_archived and all(course.is_archiveable for course in self.course_set.all())\n\n @transaction.atomic\n def archive(self):\n if not self.is_archiveable:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive()\n self.is_archived = True\n self.save()\n\n @classmethod\n def get_all_with_published_courses(cls):\n return cls.objects.filter(course__state=\"published\").distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n index = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n is_for_contributors = models.BooleanField(verbose_name=_(\"is for contributors\"), default=False)\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n class Meta:\n ordering = ('is_for_contributors', 'index', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.is_for_contributors, self.index) < (other.is_for_contributors, other.index)\n\n def __gt__(self, other):\n return (self.is_for_contributors, self.index) > (other.is_for_contributors, other.index)\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exists()\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def get_single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # graders can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_required_for_reward = models.BooleanField(verbose_name=_(\"is required for reward\"), default=True)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_date = models.DateField(verbose_name=_(\"first day of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n assert self.vote_end_date >= self.vote_end_date\n\n @property\n def is_fully_reviewed(self):\n return not self.open_textanswer_set.exists()\n\n @property\n def is_not_fully_reviewed(self):\n return self.open_textanswer_set.exists()\n\n @property\n def is_in_evaluation_period(self):\n today = datetime.date.today()\n return today >= self.vote_start_date and today <= self.vote_end_date\n\n @property\n def has_enough_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if self.is_user_contributor_or_delegate(user):\n return True\n if self.is_private and user not in self.participants.all():\n return False\n return True\n\n def can_user_see_results(self, user):\n if user.is_staff:\n return True\n if self.state == 'published':\n if self.is_user_contributor_or_delegate(user):\n return True\n if not self.can_publish_grades:\n return False\n return self.can_user_see_course(user)\n return False\n\n @property\n def is_single_result(self):\n # early return to save some queries\n if self.vote_start_date != self.vote_end_date:\n return False\n\n return self.contributions.get(responsible=True).questionnaires.filter(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME).exists()\n\n @property\n def can_staff_edit(self):\n return not self.is_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and not self.num_voters > 0\n\n @property\n def can_staff_approve(self):\n return self.state in ['new', 'prepared', 'editor_approved']\n\n @property\n def can_publish_grades(self):\n from evap.evaluation.tools import get_sum_of_answer_counters\n if self.is_single_result:\n return get_sum_of_answer_counters(self.ratinganswer_counters) > 0\n\n return self.num_voters >= settings.MIN_ANSWER_COUNT and float(self.num_voters) / self.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.has_enough_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source='prepared', target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: self.is_not_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n pass\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n pass\n\n @property\n def student_state(self):\n return STUDENT_STATES_NAMES[self.state]\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributor(self):\n return self.contributions.get(responsible=True).contributor\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - datetime.date.today()).days\n\n @property\n def days_until_evaluation(self):\n return (self.vote_start_date - datetime.date.today()).days\n\n def is_user_editor_or_delegate(self, user):\n if self.contributions.filter(can_edit=True, contributor=user).exists():\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(can_edit=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_responsible_or_delegate(self, user):\n if self.contributions.filter(responsible=True, contributor=user).exists():\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(responsible=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_contributor(self, user):\n return self.contributions.filter(contributor=user).exists()\n\n def is_user_contributor_or_delegate(self, user):\n if self.is_user_contributor(user):\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(contributor__in=represented_users).exists():\n return True\n return False\n\n def is_user_editor(self, user):\n return self.contributions.filter(contributor=user, can_edit=True).exists()\n\n def warnings(self):\n result = []\n if self.state in ['new', 'prepared', 'editor_approved'] and not self.has_enough_questionnaires:\n result.append(_(\"Not enough questionnaires assigned\"))\n if self.state in ['in_evaluation', 'evaluated', 'reviewed', 'published'] and not self.can_publish_grades:\n result.append(_(\"Not enough participants to publish results\"))\n return result\n\n @property\n def textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n return self.textanswer_set.count()\n\n @property\n def open_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n \"\"\"Pseudo relationship to all rating answers for this course\"\"\"\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive(self):\n \"\"\"Should be called only via Semester.archive\"\"\"\n if not self.is_archiveable:\n raise NotArchiveable()\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def is_archived(self):\n semester_is_archived = self.semester.is_archived\n if semester_is_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_is_archived\n\n @property\n def is_archiveable(self):\n return not self.is_archived and self.state in [\"new\", \"published\"]\n\n def was_evaluated(self, request):\n self.course_evaluated.send(sender=self.__class__, request=request, semester=self.semester)\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.exclude(type=GradeDocument.FINAL_GRADES)\n\n @property\n def grades_activated(self):\n from evap.grades.tools import are_grades_activated\n return are_grades_activated(self.semester)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n today = datetime.date.today()\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_date <= today:\n course.evaluation_begin()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and course.vote_end_date < today:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n EmailTemplate.send_evaluation_started_notifications(courses_new_in_evaluation)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(sender, **kwargs):\n course = kwargs['instance']\n transition_name = kwargs['name']\n source_state = kwargs['source']\n target_state = kwargs['target']\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(course, course.id, source_state, target_state, transition_name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def clean(self):\n # responsible contributors can always edit\n if self.responsible:\n self.can_edit = True\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.TextField(verbose_name=_(\"question text (german)\"))\n text_en = models.TextField(verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_likert_question:\n return RatingAnswerCounter\n elif self.is_grade_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n def add_vote(self):\n self.count += 1\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n reviewed_answer = models.TextField(verbose_name=_(\"reviewed answer\"), blank=True, null=True)\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_reviewed(self):\n return self.state != self.NOT_REVIEWED\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n @property\n def answer(self):\n return self.reviewed_answer or self.original_answer\n\n @answer.setter\n def answer(self, value):\n self.original_answer = value\n self.reviewed_answer = None\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.TextField(verbose_name=_(\"section title (german)\"))\n title_en = models.TextField(verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.TextField(verbose_name=_(\"question (german)\"))\n question_en = models.TextField(verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\n# taken from http://stackoverflow.com/questions/454436/unique-fields-that-allow-nulls-in-django\n# and https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/#converting-values-to-python-objects\nclass EmailNullField(models.EmailField):\n\n description = \"EmailField that stores NULL but returns ''\"\n\n def from_db_value(self, value, expression, connection, context):\n return value or \"\"\n\n def to_python(self, value): # this is the value right out of the db, or an instance\n return value or \"\"\n\n def get_prep_value(self, value): # catches value right before sending to db\n return value or None\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = EmailNullField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n # needed e.g. for compatibility with contrib.auth.admin\n def get_full_name(self):\n return self.full_name\n\n # needed e.g. for compatibility with contrib.auth.admin\n def get_short_name(self):\n if self.first_name:\n return self.first_name\n return self.username\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n def __str__(self):\n return self.full_name\n\n @property\n def is_active(self):\n return True\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n @property\n def can_staff_delete(self):\n states_with_votes = [\"in_evaluation\", \"reviewed\", \"evaluated\", \"published\"]\n if any(course.state in states_with_votes and not course.is_archived for course in self.courses_participating_in.all()):\n return False\n if self.is_contributor or self.is_grade_publisher or self.is_staff or self.is_superuser:\n return False\n if any(not user.can_staff_delete() for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete() for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def generate_login_key(self):\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = datetime.date.today() + datetime.timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except (TemplateSyntaxError, TemplateEncodingError) as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += [course.responsible_contributor]\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def __render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses}\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=use_cc)\n\n @classmethod\n def __send_to_user(cls, user, template, subject_params, body_params, use_cc):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n logger.warning(warning_message)\n messages.warning(_(warning_message))\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.generate_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.__render_string(template.subject, subject_params)\n body = cls.__render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n\n @classmethod\n def send_publish_notifications_to_user(cls, user, courses):\n template = cls.objects.get(name=cls.PUBLISHING_NOTICE)\n subject_params = {}\n body_params = {'user': user, 'courses': courses}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=True)\n\n @classmethod\n def send_review_notifications(cls, courses):\n template = cls.objects.get(name=cls.EDITOR_REVIEW_NOTICE)\n cls.send_to_users_in_courses(template, courses, [cls.EDITORS], use_cc=True)\n\n @classmethod\n def send_evaluation_started_notifications(cls, courses):\n template = cls.objects.get(name=cls.EVALUATION_STARTED)\n cls.send_to_users_in_courses(template, courses, [cls.ALL_PARTICIPANTS], use_cc=False)\n", "path": "evap/evaluation/models.py" } ]
[ { "content": "import datetime\nimport random\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin, Group\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q\nfrom django.dispatch import Signal, receiver\nfrom django.template.base import TemplateSyntaxError, TemplateEncodingError\nfrom django.template import Context, Template\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.functional import cached_property\n\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\n\n\nlogger = logging.getLogger(__name__)\n\n\n# for converting state into student_state\nSTUDENT_STATES_NAMES = {\n 'new': 'upcoming',\n 'prepared': 'upcoming',\n 'editor_approved': 'upcoming',\n 'approved': 'upcoming',\n 'in_evaluation': 'in_evaluation',\n 'evaluated': 'evaluationFinished',\n 'reviewed': 'evaluationFinished',\n 'published': 'published'\n}\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n is_archived = models.BooleanField(default=False, verbose_name=_(\"is archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def is_archiveable(self):\n return not self.is_archived and all(course.is_archiveable for course in self.course_set.all())\n\n @transaction.atomic\n def archive(self):\n if not self.is_archiveable:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive()\n self.is_archived = True\n self.save()\n\n @classmethod\n def get_all_with_published_courses(cls):\n return cls.objects.filter(course__state=\"published\").distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n index = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n is_for_contributors = models.BooleanField(verbose_name=_(\"is for contributors\"), default=False)\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n class Meta:\n ordering = ('is_for_contributors', 'index', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.is_for_contributors, self.index) < (other.is_for_contributors, other.index)\n\n def __gt__(self, other):\n return (self.is_for_contributors, self.index) > (other.is_for_contributors, other.index)\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exists()\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def get_single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # graders can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_required_for_reward = models.BooleanField(verbose_name=_(\"is required for reward\"), default=True)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_date = models.DateField(verbose_name=_(\"first day of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n assert self.vote_end_date >= self.vote_end_date\n\n @property\n def is_fully_reviewed(self):\n return not self.open_textanswer_set.exists()\n\n @property\n def is_not_fully_reviewed(self):\n return self.open_textanswer_set.exists()\n\n @property\n def is_in_evaluation_period(self):\n today = datetime.date.today()\n return today >= self.vote_start_date and today <= self.vote_end_date\n\n @property\n def has_enough_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if self.is_user_contributor_or_delegate(user):\n return True\n if self.is_private and user not in self.participants.all():\n return False\n return True\n\n def can_user_see_results(self, user):\n if user.is_staff:\n return True\n if self.state == 'published':\n if self.is_user_contributor_or_delegate(user):\n return True\n if not self.can_publish_grades:\n return False\n return self.can_user_see_course(user)\n return False\n\n @property\n def is_single_result(self):\n # early return to save some queries\n if self.vote_start_date != self.vote_end_date:\n return False\n\n return self.contributions.get(responsible=True).questionnaires.filter(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME).exists()\n\n @property\n def can_staff_edit(self):\n return not self.is_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and not self.num_voters > 0\n\n @property\n def can_staff_approve(self):\n return self.state in ['new', 'prepared', 'editor_approved']\n\n @property\n def can_publish_grades(self):\n from evap.evaluation.tools import get_sum_of_answer_counters\n if self.is_single_result:\n return get_sum_of_answer_counters(self.ratinganswer_counters) > 0\n\n return self.num_voters >= settings.MIN_ANSWER_COUNT and float(self.num_voters) / self.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.has_enough_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source=['prepared', 'approved'], target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: self.is_not_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n pass\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n pass\n\n @property\n def student_state(self):\n return STUDENT_STATES_NAMES[self.state]\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributor(self):\n return self.contributions.get(responsible=True).contributor\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - datetime.date.today()).days\n\n @property\n def days_until_evaluation(self):\n return (self.vote_start_date - datetime.date.today()).days\n\n def is_user_editor_or_delegate(self, user):\n if self.contributions.filter(can_edit=True, contributor=user).exists():\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(can_edit=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_responsible_or_delegate(self, user):\n if self.contributions.filter(responsible=True, contributor=user).exists():\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(responsible=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_contributor(self, user):\n return self.contributions.filter(contributor=user).exists()\n\n def is_user_contributor_or_delegate(self, user):\n if self.is_user_contributor(user):\n return True\n else:\n represented_users = user.represented_users.all()\n if self.contributions.filter(contributor__in=represented_users).exists():\n return True\n return False\n\n def is_user_editor(self, user):\n return self.contributions.filter(contributor=user, can_edit=True).exists()\n\n def warnings(self):\n result = []\n if self.state in ['new', 'prepared', 'editor_approved'] and not self.has_enough_questionnaires:\n result.append(_(\"Not enough questionnaires assigned\"))\n if self.state in ['in_evaluation', 'evaluated', 'reviewed', 'published'] and not self.can_publish_grades:\n result.append(_(\"Not enough participants to publish results\"))\n return result\n\n @property\n def textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n return self.textanswer_set.count()\n\n @property\n def open_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n \"\"\"Pseudo relationship to all rating answers for this course\"\"\"\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive(self):\n \"\"\"Should be called only via Semester.archive\"\"\"\n if not self.is_archiveable:\n raise NotArchiveable()\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def is_archived(self):\n semester_is_archived = self.semester.is_archived\n if semester_is_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_is_archived\n\n @property\n def is_archiveable(self):\n return not self.is_archived and self.state in [\"new\", \"published\"]\n\n def was_evaluated(self, request):\n self.course_evaluated.send(sender=self.__class__, request=request, semester=self.semester)\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.exclude(type=GradeDocument.FINAL_GRADES)\n\n @property\n def grades_activated(self):\n from evap.grades.tools import are_grades_activated\n return are_grades_activated(self.semester)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n today = datetime.date.today()\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_date <= today:\n course.evaluation_begin()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and course.vote_end_date < today:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n EmailTemplate.send_evaluation_started_notifications(courses_new_in_evaluation)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(sender, **kwargs):\n course = kwargs['instance']\n transition_name = kwargs['name']\n source_state = kwargs['source']\n target_state = kwargs['target']\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(course, course.id, source_state, target_state, transition_name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def clean(self):\n # responsible contributors can always edit\n if self.responsible:\n self.can_edit = True\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.TextField(verbose_name=_(\"question text (german)\"))\n text_en = models.TextField(verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_likert_question:\n return RatingAnswerCounter\n elif self.is_grade_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n def add_vote(self):\n self.count += 1\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n reviewed_answer = models.TextField(verbose_name=_(\"reviewed answer\"), blank=True, null=True)\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_reviewed(self):\n return self.state != self.NOT_REVIEWED\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n @property\n def answer(self):\n return self.reviewed_answer or self.original_answer\n\n @answer.setter\n def answer(self, value):\n self.original_answer = value\n self.reviewed_answer = None\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.TextField(verbose_name=_(\"section title (german)\"))\n title_en = models.TextField(verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.TextField(verbose_name=_(\"question (german)\"))\n question_en = models.TextField(verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\n# taken from http://stackoverflow.com/questions/454436/unique-fields-that-allow-nulls-in-django\n# and https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/#converting-values-to-python-objects\nclass EmailNullField(models.EmailField):\n\n description = \"EmailField that stores NULL but returns ''\"\n\n def from_db_value(self, value, expression, connection, context):\n return value or \"\"\n\n def to_python(self, value): # this is the value right out of the db, or an instance\n return value or \"\"\n\n def get_prep_value(self, value): # catches value right before sending to db\n return value or None\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = EmailNullField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n # needed e.g. for compatibility with contrib.auth.admin\n def get_full_name(self):\n return self.full_name\n\n # needed e.g. for compatibility with contrib.auth.admin\n def get_short_name(self):\n if self.first_name:\n return self.first_name\n return self.username\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n def __str__(self):\n return self.full_name\n\n @property\n def is_active(self):\n return True\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n @property\n def can_staff_delete(self):\n states_with_votes = [\"in_evaluation\", \"reviewed\", \"evaluated\", \"published\"]\n if any(course.state in states_with_votes and not course.is_archived for course in self.courses_participating_in.all()):\n return False\n if self.is_contributor or self.is_grade_publisher or self.is_staff or self.is_superuser:\n return False\n if any(not user.can_staff_delete() for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete() for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def generate_login_key(self):\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = datetime.date.today() + datetime.timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except (TemplateSyntaxError, TemplateEncodingError) as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += [course.responsible_contributor]\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def __render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses}\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=use_cc)\n\n @classmethod\n def __send_to_user(cls, user, template, subject_params, body_params, use_cc):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n logger.warning(warning_message)\n messages.warning(_(warning_message))\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.generate_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.__render_string(template.subject, subject_params)\n body = cls.__render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n\n @classmethod\n def send_publish_notifications_to_user(cls, user, courses):\n template = cls.objects.get(name=cls.PUBLISHING_NOTICE)\n subject_params = {}\n body_params = {'user': user, 'courses': courses}\n\n cls.__send_to_user(user, template, subject_params, body_params, use_cc=True)\n\n @classmethod\n def send_review_notifications(cls, courses):\n template = cls.objects.get(name=cls.EDITOR_REVIEW_NOTICE)\n cls.send_to_users_in_courses(template, courses, [cls.EDITORS], use_cc=True)\n\n @classmethod\n def send_evaluation_started_notifications(cls, courses):\n template = cls.objects.get(name=cls.EVALUATION_STARTED)\n cls.send_to_users_in_courses(template, courses, [cls.ALL_PARTICIPANTS], use_cc=False)\n", "path": "evap/evaluation/models.py" } ]
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index f684e78511..0365ccd22c 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -343,7 +343,7 @@ def editor_approve(self): def staff_approve(self): pass - @transition(field=state, source='prepared', target='new') + @transition(field=state, source=['prepared', 'approved'], target='new') def revert_to_new(self): pass diff --git a/evap/evaluation/tests/test_coverage.py b/evap/evaluation/tests/test_coverage.py index cdbfd97668..206be420e3 100644 --- a/evap/evaluation/tests/test_coverage.py +++ b/evap/evaluation/tests/test_coverage.py @@ -175,9 +175,12 @@ def helper_semester_state_views(self, course_ids, old_state, new_state, operatio def test_semester_publish(self): self.helper_semester_state_views([7], "reviewed", "published", "publish") - def test_semester_reset(self): + def test_semester_reset_1(self): self.helper_semester_state_views([2], "prepared", "new", "revertToNew") + def test_semester_reset_2(self): + self.helper_semester_state_views([4], "approved", "new", "revertToNew") + def test_semester_approve_1(self): self.helper_semester_state_views([1], "new", "approved", "approve") diff --git a/evap/staff/templates/staff_semester_view.html b/evap/staff/templates/staff_semester_view.html index b514c08ca9..15916fc96c 100644 --- a/evap/staff/templates/staff_semester_view.html +++ b/evap/staff/templates/staff_semester_view.html @@ -148,9 +148,9 @@ <h3 class="panel-title">{% trans "Actions" %}</h3> {% ifequal state "new" %} <button name="operation" value="prepare" type="submit" class="btn btn-primary" {{ disable_if_archived }}>{% trans "Enable courses for editor review" %}</button> {% endifequal %} - {% ifequal state "prepared" %} + {% if state == 'prepared' or state == 'approved' %} <button name="operation" value="revertToNew" type="submit" class="btn btn-primary" {{ disable_if_archived }}>{% trans "Revert courses to new" %}</button> - {% endifequal %} + {% endif %} {% if state == 'new' or state == 'prepared' or state == 'editor_approved' %} <button name="operation" value="approve" type="submit" class="btn btn-primary" {{ disable_if_archived }}>{% trans "Approve courses" %}</button> {% endif %}
praw-dev__praw-1783
Typo in the Docs Just a tiny typo in the documentation of the `Comment` class: https://github.com/praw-dev/praw/blob/66234a650e67bf0c997ee3d548ae38652795a744/praw/models/reddit/comment.py#L21-L22
[ { "content": "\"\"\"Provide the Comment class.\"\"\"\nfrom typing import TYPE_CHECKING, Any, Dict, Optional, Union\n\nfrom ...const import API_PATH\nfrom ...exceptions import ClientException, InvalidURL\nfrom ...util.cache import cachedproperty\nfrom ..comment_forest import CommentForest\nfrom .base import RedditBase\nfrom .mixins import (\n FullnameMixin,\n InboxableMixin,\n ThingModerationMixin,\n UserContentMixin,\n)\nfrom .redditor import Redditor\n\nif TYPE_CHECKING: # pragma: no cover\n import praw\n\n\nclass Comment(InboxableMixin, UserContentMixin, FullnameMixin, RedditBase):\n \"\"\"A class that represents a reddit comments.\n\n **Typical Attributes**\n\n This table describes attributes that typically belong to objects of this class.\n Since attributes are dynamically provided (see\n :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that\n these attributes will always be present, nor is this list necessarily complete.\n\n ================= ==================================================================\n Attribute Description\n ================= ==================================================================\n ``author`` Provides an instance of :class:`.Redditor`.\n ``body`` The body of the comment, as Markdown.\n ``body_html`` The body of the comment, as HTML.\n ``created_utc`` Time the comment was created, represented in `Unix Time`_.\n ``distinguished`` Whether or not the comment is distinguished.\n ``edited`` Whether or not the comment has been edited.\n ``id`` The ID of the comment.\n ``is_submitter`` Whether or not the comment author is also the author of the\n submission.\n ``link_id`` The submission ID that the comment belongs to.\n ``parent_id`` The ID of the parent comment (prefixed with ``t1_``). If it is a\n top-level comment, this returns the submission ID instead\n (prefixed with ``t3_``).\n ``permalink`` A permalink for the comment. Comment objects from the inbox have a\n ``context`` attribute instead.\n ``replies`` Provides an instance of :class:`.CommentForest`.\n ``saved`` Whether or not the comment is saved.\n ``score`` The number of upvotes for the comment.\n ``stickied`` Whether or not the comment is stickied.\n ``submission`` Provides an instance of :class:`.Submission`. The submission that\n the comment belongs to.\n ``subreddit`` Provides an instance of :class:`.Subreddit`. The subreddit that\n the comment belongs to.\n ``subreddit_id`` The subreddit ID that the comment belongs to.\n ================= ==================================================================\n\n .. _unix time: https://en.wikipedia.org/wiki/Unix_time\n\n \"\"\"\n\n MISSING_COMMENT_MESSAGE = \"This comment does not appear to be in the comment tree\"\n STR_FIELD = \"id\"\n\n @staticmethod\n def id_from_url(url: str) -> str:\n \"\"\"Get the ID of a comment from the full URL.\"\"\"\n parts = RedditBase._url_parts(url)\n try:\n comment_index = parts.index(\"comments\")\n except ValueError:\n raise InvalidURL(url)\n\n if len(parts) - 4 != comment_index:\n raise InvalidURL(url)\n return parts[-1]\n\n @property\n def _kind(self) -> str:\n \"\"\"Return the class's kind.\"\"\"\n return self._reddit.config.kinds[\"comment\"]\n\n @property\n def is_root(self) -> bool:\n \"\"\"Return True when the comment is a top level comment.\"\"\"\n parent_type = self.parent_id.split(\"_\", 1)[0]\n return parent_type == self._reddit.config.kinds[\"submission\"]\n\n @cachedproperty\n def mod(self) -> \"praw.models.reddit.comment.CommentModeration\":\n \"\"\"Provide an instance of :class:`.CommentModeration`.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.approve()\n\n \"\"\"\n return CommentModeration(self)\n\n @property\n def replies(self) -> CommentForest:\n \"\"\"Provide an instance of :class:`.CommentForest`.\n\n This property may return an empty list if the comment has not been refreshed\n with :meth:`.refresh()`\n\n Sort order and reply limit can be set with the ``reply_sort`` and\n ``reply_limit`` attributes before replies are fetched, including any call to\n :meth:`.refresh`:\n\n .. code-block:: python\n\n comment.reply_sort = \"new\"\n comment.refresh()\n replies = comment.replies\n\n .. note::\n\n The appropriate values for ``reply_sort`` include ``confidence``,\n ``controversial``, ``new``, ``old``, ``q&a``, and ``top``.\n\n \"\"\"\n if isinstance(self._replies, list):\n self._replies = CommentForest(self.submission, self._replies)\n return self._replies\n\n @property\n def submission(self) -> \"praw.models.Submission\":\n \"\"\"Return the Submission object this comment belongs to.\"\"\"\n if not self._submission: # Comment not from submission\n self._submission = self._reddit.submission(self._extract_submission_id())\n return self._submission\n\n @submission.setter\n def submission(self, submission: \"praw.models.Submission\"):\n \"\"\"Update the Submission associated with the Comment.\"\"\"\n submission._comments_by_id[self.fullname] = self\n self._submission = submission\n # pylint: disable=not-an-iterable\n for reply in getattr(self, \"replies\", []):\n reply.submission = submission\n\n def __init__(\n self,\n reddit: \"praw.Reddit\",\n id: Optional[str] = None, # pylint: disable=redefined-builtin\n url: Optional[str] = None,\n _data: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"Construct an instance of the Comment object.\"\"\"\n if (id, url, _data).count(None) != 2:\n raise TypeError(\"Exactly one of `id`, `url`, or `_data` must be provided.\")\n fetched = False\n self._replies = []\n self._submission = None\n if id:\n self.id = id\n elif url:\n self.id = self.id_from_url(url)\n else:\n fetched = True\n super().__init__(reddit, _data=_data, _fetched=fetched)\n\n def __setattr__(\n self,\n attribute: str,\n value: Union[str, Redditor, CommentForest, \"praw.models.Subreddit\"],\n ):\n \"\"\"Objectify author, replies, and subreddit.\"\"\"\n if attribute == \"author\":\n value = Redditor.from_data(self._reddit, value)\n elif attribute == \"replies\":\n if value == \"\":\n value = []\n else:\n value = self._reddit._objector.objectify(value).children\n attribute = \"_replies\"\n elif attribute == \"subreddit\":\n value = self._reddit.subreddit(value)\n super().__setattr__(attribute, value)\n\n def _fetch_info(self):\n return \"info\", {}, {\"id\": self.fullname}\n\n def _fetch_data(self):\n name, fields, params = self._fetch_info()\n path = API_PATH[name].format(**fields)\n return self._reddit.request(\"GET\", path, params)\n\n def _fetch(self):\n data = self._fetch_data()\n data = data[\"data\"]\n\n if not data[\"children\"]:\n raise ClientException(f\"No data returned for comment {self.fullname}\")\n\n comment_data = data[\"children\"][0][\"data\"]\n other = type(self)(self._reddit, _data=comment_data)\n self.__dict__.update(other.__dict__)\n self._fetched = True\n\n def _extract_submission_id(self):\n if \"context\" in self.__dict__:\n return self.context.rsplit(\"/\", 4)[1]\n return self.link_id.split(\"_\", 1)[1]\n\n def parent(self) -> Union[\"Comment\", \"praw.models.Submission\"]:\n \"\"\"Return the parent of the comment.\n\n The returned parent will be an instance of either :class:`.Comment`, or\n :class:`.Submission`.\n\n If this comment was obtained through a :class:`.Submission`, then its entire\n ancestry should be immediately available, requiring no extra network requests.\n However, if this comment was obtained through other means, e.g.,\n ``reddit.comment(\"COMMENT_ID\")``, or ``reddit.inbox.comment_replies``, then the\n returned parent may be a lazy instance of either :class:`.Comment`, or\n :class:`.Submission`.\n\n Lazy comment example:\n\n .. code-block:: python\n\n comment = reddit.comment(\"cklhv0f\")\n parent = comment.parent()\n # `replies` is empty until the comment is refreshed\n print(parent.replies) # Output: []\n parent.refresh()\n print(parent.replies) # Output is at least: [Comment(id=\"cklhv0f\")]\n\n .. warning::\n\n Successive calls to :meth:`.parent()` may result in a network request per\n call when the comment is not obtained through a :class:`.Submission`. See\n below for an example of how to minimize requests.\n\n If you have a deeply nested comment and wish to most efficiently discover its\n top-most :class:`.Comment` ancestor you can chain successive calls to\n :meth:`.parent()` with calls to :meth:`.refresh()` at every 9 levels. For\n example:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n ancestor = comment\n refresh_counter = 0\n while not ancestor.is_root:\n ancestor = ancestor.parent()\n if refresh_counter % 9 == 0:\n ancestor.refresh()\n refresh_counter += 1\n print(f\"Top-most Ancestor: {ancestor}\")\n\n The above code should result in 5 network requests to Reddit. Without the calls\n to :meth:`.refresh()` it would make at least 31 network requests.\n\n \"\"\"\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split(\"_\", 1)[1])\n parent._submission = self.submission\n return parent\n\n def refresh(self):\n \"\"\"Refresh the comment's attributes.\n\n If using :meth:`.Reddit.comment` this method must be called in order to obtain\n the comment's replies.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.refresh()\n\n \"\"\"\n if \"context\" in self.__dict__: # Using hasattr triggers a fetch\n comment_path = self.context.split(\"?\", 1)[0]\n else:\n path = API_PATH[\"submission\"].format(id=self.submission.id)\n comment_path = f\"{path}_/{self.id}\"\n\n # The context limit appears to be 8, but let's ask for more anyway.\n params = {\"context\": 100}\n if \"reply_limit\" in self.__dict__:\n params[\"limit\"] = self.reply_limit\n if \"reply_sort\" in self.__dict__:\n params[\"sort\"] = self.reply_sort\n comment_list = self._reddit.get(comment_path, params=params)[1].children\n if not comment_list:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n # With context, the comment may be nested so we have to find it\n comment = None\n queue = comment_list[:]\n while queue and (comment is None or comment.id != self.id):\n comment = queue.pop()\n if isinstance(comment, Comment):\n queue.extend(comment._replies)\n\n if comment.id != self.id:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n if self._submission is not None:\n del comment.__dict__[\"_submission\"] # Don't replace if set\n self.__dict__.update(comment.__dict__)\n\n for reply in comment_list:\n reply.submission = self.submission\n return self\n\n\nclass CommentModeration(ThingModerationMixin):\n \"\"\"Provide a set of functions pertaining to Comment moderation.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.approve()\n\n \"\"\"\n\n REMOVAL_MESSAGE_API = \"removal_comment_message\"\n\n def __init__(self, comment: \"praw.models.Comment\"):\n \"\"\"Create a CommentModeration instance.\n\n :param comment: The comment to moderate.\n\n \"\"\"\n self.thing = comment\n\n def show(self):\n \"\"\"Uncollapse a :class:`~.Comment` that has been collapsed by Crowd Control.\n\n Example usage:\n\n .. code-block:: python\n\n # Uncollapse a comment:\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.show()\n\n \"\"\"\n url = API_PATH[\"show_comment\"]\n\n self.thing._reddit.post(url, data={\"id\": self.thing.fullname})\n", "path": "praw/models/reddit/comment.py" } ]
[ { "content": "\"\"\"Provide the Comment class.\"\"\"\nfrom typing import TYPE_CHECKING, Any, Dict, Optional, Union\n\nfrom ...const import API_PATH\nfrom ...exceptions import ClientException, InvalidURL\nfrom ...util.cache import cachedproperty\nfrom ..comment_forest import CommentForest\nfrom .base import RedditBase\nfrom .mixins import (\n FullnameMixin,\n InboxableMixin,\n ThingModerationMixin,\n UserContentMixin,\n)\nfrom .redditor import Redditor\n\nif TYPE_CHECKING: # pragma: no cover\n import praw\n\n\nclass Comment(InboxableMixin, UserContentMixin, FullnameMixin, RedditBase):\n \"\"\"A class that represents a reddit comment.\n\n **Typical Attributes**\n\n This table describes attributes that typically belong to objects of this class.\n Since attributes are dynamically provided (see\n :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that\n these attributes will always be present, nor is this list necessarily complete.\n\n ================= ==================================================================\n Attribute Description\n ================= ==================================================================\n ``author`` Provides an instance of :class:`.Redditor`.\n ``body`` The body of the comment, as Markdown.\n ``body_html`` The body of the comment, as HTML.\n ``created_utc`` Time the comment was created, represented in `Unix Time`_.\n ``distinguished`` Whether or not the comment is distinguished.\n ``edited`` Whether or not the comment has been edited.\n ``id`` The ID of the comment.\n ``is_submitter`` Whether or not the comment author is also the author of the\n submission.\n ``link_id`` The submission ID that the comment belongs to.\n ``parent_id`` The ID of the parent comment (prefixed with ``t1_``). If it is a\n top-level comment, this returns the submission ID instead\n (prefixed with ``t3_``).\n ``permalink`` A permalink for the comment. Comment objects from the inbox have a\n ``context`` attribute instead.\n ``replies`` Provides an instance of :class:`.CommentForest`.\n ``saved`` Whether or not the comment is saved.\n ``score`` The number of upvotes for the comment.\n ``stickied`` Whether or not the comment is stickied.\n ``submission`` Provides an instance of :class:`.Submission`. The submission that\n the comment belongs to.\n ``subreddit`` Provides an instance of :class:`.Subreddit`. The subreddit that\n the comment belongs to.\n ``subreddit_id`` The subreddit ID that the comment belongs to.\n ================= ==================================================================\n\n .. _unix time: https://en.wikipedia.org/wiki/Unix_time\n\n \"\"\"\n\n MISSING_COMMENT_MESSAGE = \"This comment does not appear to be in the comment tree\"\n STR_FIELD = \"id\"\n\n @staticmethod\n def id_from_url(url: str) -> str:\n \"\"\"Get the ID of a comment from the full URL.\"\"\"\n parts = RedditBase._url_parts(url)\n try:\n comment_index = parts.index(\"comments\")\n except ValueError:\n raise InvalidURL(url)\n\n if len(parts) - 4 != comment_index:\n raise InvalidURL(url)\n return parts[-1]\n\n @property\n def _kind(self) -> str:\n \"\"\"Return the class's kind.\"\"\"\n return self._reddit.config.kinds[\"comment\"]\n\n @property\n def is_root(self) -> bool:\n \"\"\"Return True when the comment is a top level comment.\"\"\"\n parent_type = self.parent_id.split(\"_\", 1)[0]\n return parent_type == self._reddit.config.kinds[\"submission\"]\n\n @cachedproperty\n def mod(self) -> \"praw.models.reddit.comment.CommentModeration\":\n \"\"\"Provide an instance of :class:`.CommentModeration`.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.approve()\n\n \"\"\"\n return CommentModeration(self)\n\n @property\n def replies(self) -> CommentForest:\n \"\"\"Provide an instance of :class:`.CommentForest`.\n\n This property may return an empty list if the comment has not been refreshed\n with :meth:`.refresh()`\n\n Sort order and reply limit can be set with the ``reply_sort`` and\n ``reply_limit`` attributes before replies are fetched, including any call to\n :meth:`.refresh`:\n\n .. code-block:: python\n\n comment.reply_sort = \"new\"\n comment.refresh()\n replies = comment.replies\n\n .. note::\n\n The appropriate values for ``reply_sort`` include ``confidence``,\n ``controversial``, ``new``, ``old``, ``q&a``, and ``top``.\n\n \"\"\"\n if isinstance(self._replies, list):\n self._replies = CommentForest(self.submission, self._replies)\n return self._replies\n\n @property\n def submission(self) -> \"praw.models.Submission\":\n \"\"\"Return the Submission object this comment belongs to.\"\"\"\n if not self._submission: # Comment not from submission\n self._submission = self._reddit.submission(self._extract_submission_id())\n return self._submission\n\n @submission.setter\n def submission(self, submission: \"praw.models.Submission\"):\n \"\"\"Update the Submission associated with the Comment.\"\"\"\n submission._comments_by_id[self.fullname] = self\n self._submission = submission\n # pylint: disable=not-an-iterable\n for reply in getattr(self, \"replies\", []):\n reply.submission = submission\n\n def __init__(\n self,\n reddit: \"praw.Reddit\",\n id: Optional[str] = None, # pylint: disable=redefined-builtin\n url: Optional[str] = None,\n _data: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"Construct an instance of the Comment object.\"\"\"\n if (id, url, _data).count(None) != 2:\n raise TypeError(\"Exactly one of `id`, `url`, or `_data` must be provided.\")\n fetched = False\n self._replies = []\n self._submission = None\n if id:\n self.id = id\n elif url:\n self.id = self.id_from_url(url)\n else:\n fetched = True\n super().__init__(reddit, _data=_data, _fetched=fetched)\n\n def __setattr__(\n self,\n attribute: str,\n value: Union[str, Redditor, CommentForest, \"praw.models.Subreddit\"],\n ):\n \"\"\"Objectify author, replies, and subreddit.\"\"\"\n if attribute == \"author\":\n value = Redditor.from_data(self._reddit, value)\n elif attribute == \"replies\":\n if value == \"\":\n value = []\n else:\n value = self._reddit._objector.objectify(value).children\n attribute = \"_replies\"\n elif attribute == \"subreddit\":\n value = self._reddit.subreddit(value)\n super().__setattr__(attribute, value)\n\n def _fetch_info(self):\n return \"info\", {}, {\"id\": self.fullname}\n\n def _fetch_data(self):\n name, fields, params = self._fetch_info()\n path = API_PATH[name].format(**fields)\n return self._reddit.request(\"GET\", path, params)\n\n def _fetch(self):\n data = self._fetch_data()\n data = data[\"data\"]\n\n if not data[\"children\"]:\n raise ClientException(f\"No data returned for comment {self.fullname}\")\n\n comment_data = data[\"children\"][0][\"data\"]\n other = type(self)(self._reddit, _data=comment_data)\n self.__dict__.update(other.__dict__)\n self._fetched = True\n\n def _extract_submission_id(self):\n if \"context\" in self.__dict__:\n return self.context.rsplit(\"/\", 4)[1]\n return self.link_id.split(\"_\", 1)[1]\n\n def parent(self) -> Union[\"Comment\", \"praw.models.Submission\"]:\n \"\"\"Return the parent of the comment.\n\n The returned parent will be an instance of either :class:`.Comment`, or\n :class:`.Submission`.\n\n If this comment was obtained through a :class:`.Submission`, then its entire\n ancestry should be immediately available, requiring no extra network requests.\n However, if this comment was obtained through other means, e.g.,\n ``reddit.comment(\"COMMENT_ID\")``, or ``reddit.inbox.comment_replies``, then the\n returned parent may be a lazy instance of either :class:`.Comment`, or\n :class:`.Submission`.\n\n Lazy comment example:\n\n .. code-block:: python\n\n comment = reddit.comment(\"cklhv0f\")\n parent = comment.parent()\n # `replies` is empty until the comment is refreshed\n print(parent.replies) # Output: []\n parent.refresh()\n print(parent.replies) # Output is at least: [Comment(id=\"cklhv0f\")]\n\n .. warning::\n\n Successive calls to :meth:`.parent()` may result in a network request per\n call when the comment is not obtained through a :class:`.Submission`. See\n below for an example of how to minimize requests.\n\n If you have a deeply nested comment and wish to most efficiently discover its\n top-most :class:`.Comment` ancestor you can chain successive calls to\n :meth:`.parent()` with calls to :meth:`.refresh()` at every 9 levels. For\n example:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n ancestor = comment\n refresh_counter = 0\n while not ancestor.is_root:\n ancestor = ancestor.parent()\n if refresh_counter % 9 == 0:\n ancestor.refresh()\n refresh_counter += 1\n print(f\"Top-most Ancestor: {ancestor}\")\n\n The above code should result in 5 network requests to Reddit. Without the calls\n to :meth:`.refresh()` it would make at least 31 network requests.\n\n \"\"\"\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split(\"_\", 1)[1])\n parent._submission = self.submission\n return parent\n\n def refresh(self):\n \"\"\"Refresh the comment's attributes.\n\n If using :meth:`.Reddit.comment` this method must be called in order to obtain\n the comment's replies.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.refresh()\n\n \"\"\"\n if \"context\" in self.__dict__: # Using hasattr triggers a fetch\n comment_path = self.context.split(\"?\", 1)[0]\n else:\n path = API_PATH[\"submission\"].format(id=self.submission.id)\n comment_path = f\"{path}_/{self.id}\"\n\n # The context limit appears to be 8, but let's ask for more anyway.\n params = {\"context\": 100}\n if \"reply_limit\" in self.__dict__:\n params[\"limit\"] = self.reply_limit\n if \"reply_sort\" in self.__dict__:\n params[\"sort\"] = self.reply_sort\n comment_list = self._reddit.get(comment_path, params=params)[1].children\n if not comment_list:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n # With context, the comment may be nested so we have to find it\n comment = None\n queue = comment_list[:]\n while queue and (comment is None or comment.id != self.id):\n comment = queue.pop()\n if isinstance(comment, Comment):\n queue.extend(comment._replies)\n\n if comment.id != self.id:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n if self._submission is not None:\n del comment.__dict__[\"_submission\"] # Don't replace if set\n self.__dict__.update(comment.__dict__)\n\n for reply in comment_list:\n reply.submission = self.submission\n return self\n\n\nclass CommentModeration(ThingModerationMixin):\n \"\"\"Provide a set of functions pertaining to Comment moderation.\n\n Example usage:\n\n .. code-block:: python\n\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.approve()\n\n \"\"\"\n\n REMOVAL_MESSAGE_API = \"removal_comment_message\"\n\n def __init__(self, comment: \"praw.models.Comment\"):\n \"\"\"Create a CommentModeration instance.\n\n :param comment: The comment to moderate.\n\n \"\"\"\n self.thing = comment\n\n def show(self):\n \"\"\"Uncollapse a :class:`~.Comment` that has been collapsed by Crowd Control.\n\n Example usage:\n\n .. code-block:: python\n\n # Uncollapse a comment:\n comment = reddit.comment(\"dkk4qjd\")\n comment.mod.show()\n\n \"\"\"\n url = API_PATH[\"show_comment\"]\n\n self.thing._reddit.post(url, data={\"id\": self.thing.fullname})\n", "path": "praw/models/reddit/comment.py" } ]
diff --git a/AUTHORS.rst b/AUTHORS.rst index 435408c0b..3f5bdbd51 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -26,6 +26,7 @@ Documentation Contributors - Federico Gallo `@Karmavil <https://github.com/Karmavil>`_ - Aidan Welch `@AidanWelch <https://github.com/AidanWelch>`_ - Tom Eagles `@deplorableword <https://github.com/deplorableword>`_ +- Mohammad Ghalayini `@mghalayini <https://github.com/mghalayini>`_ - Add "Name <email (optional)> and github profile link" above this line. Logo Creator diff --git a/praw/models/reddit/comment.py b/praw/models/reddit/comment.py index c8e732581..ff3a51c25 100644 --- a/praw/models/reddit/comment.py +++ b/praw/models/reddit/comment.py @@ -19,7 +19,7 @@ class Comment(InboxableMixin, UserContentMixin, FullnameMixin, RedditBase): - """A class that represents a reddit comments. + """A class that represents a reddit comment. **Typical Attributes**
pydantic__pydantic-2053
underscore_attrs_are_private causes TypeError ### Checks - [x] I added a descriptive title to this issue - [x] I have searched (google, github) for similar issues and couldn't find anything - [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug # Bug Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`: ``` pydantic version: 1.7 pydantic compiled: True install path: /Users/ahedges/.pyenv/versions/3.7.8/envs/sdf/lib/python3.7/site-packages/pydantic python version: 3.7.8 (default, Sep 16 2020, 18:33:23) [Clang 11.0.3 (clang-1103.0.32.59)] platform: Darwin-19.6.0-x86_64-i386-64bit optional deps. installed: ['typing-extensions'] ``` I spent a decent amount of time this weekend trying to make a private field using code posted in #655. I was happy to see Pydantic 1.7 came out today and had support for private fields built in. I upgraded and tried to convert my code, but I encountered some unusual problems. (Even though it doesn't work perfectly, I still appreciate the feature.) Most are type errors from mypy (might report later), but this one is more serious. I simplified the problem below. The issue is that `underscore_attrs_are_private` causes an exception where `PrivateAttr` does not. When using `underscore_attrs_are_private` with the following code: ```python from typing import Any from pydantic import BaseModel class TestObject(BaseModel): public_field: str _private_field: str class Config: underscore_attrs_are_private = True def __init__(self, **data: Any) -> None: super().__init__(**data) self._private_field = "bar" print(TestObject(public_field="foo")) ``` I get the following output: ``` test.py:4: DeprecationWarning: __class__ not set defining 'TestObject' as <class '__main__.TestObject'>. Was __classcell__ propagated to type.__new__? class TestObject(BaseModel): Traceback (most recent call last): File "test.py", line 15, in <module> print(TestObject(public_field="foo")) File "test.py", line 12, in __init__ super().__init__(**data) File "pydantic/main.py", line 365, in pydantic.main.BaseModel.__init__ File "pydantic/main.py", line 424, in pydantic.main.BaseModel._init_private_attributes File "pydantic/fields.py", line 821, in pydantic.fields.PrivateAttr.get_default File "pydantic/utils.py", line 624, in pydantic.utils.smart_deepcopy File "/Users/ahedges/.pyenv/versions/3.7.8/lib/python3.7/copy.py", line 169, in deepcopy rv = reductor(4) TypeError: can't pickle cell objects ``` However, when using `PrivateAttr` with the following code: ```python from typing import Any from pydantic import BaseModel, PrivateAttr class TestObject(BaseModel): public_field: str _private_field: str = PrivateAttr() def __init__(self, **data: Any) -> None: super().__init__(**data) self._private_field = "bar" print(TestObject(public_field="foo")) ``` I get the following, desired output: ``` public_field='foo' ``` I also noticed that removing `__init__()` from the first example also prevents the crash. However, it is needed to set the private field. Another thing to note is that in my full code, the exception caused by `underscore_attrs_are_private` appears but the `DeprecationWarning` does not. If you think this matters, I can try to reproduce my code without the warning.
[ { "content": "import warnings\nimport weakref\nfrom collections import OrderedDict, defaultdict, deque\nfrom copy import deepcopy\nfrom itertools import islice\nfrom types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n Generator,\n Iterator,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n no_type_check,\n)\n\nfrom .typing import NoneType, display_as_type\nfrom .version import version_info\n\nif TYPE_CHECKING:\n from inspect import Signature\n from pathlib import Path\n\n from .dataclasses import Dataclass # noqa: F401\n from .fields import ModelField # noqa: F401\n from .main import BaseConfig, BaseModel # noqa: F401\n from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs # noqa: F401\n\n__all__ = (\n 'import_string',\n 'sequence_like',\n 'validate_field_name',\n 'lenient_issubclass',\n 'in_ipython',\n 'deep_update',\n 'update_not_none',\n 'almost_equal_floats',\n 'get_model',\n 'to_camel',\n 'is_valid_field',\n 'smart_deepcopy',\n 'PyObjectStr',\n 'Representation',\n 'GetterDict',\n 'ValueItems',\n 'version_info', # required here to match behaviour in v1.3\n 'ClassAttribute',\n 'path_type',\n 'ROOT_KEY',\n)\n\nROOT_KEY = '__root__'\n# these are types that are returned unchanged by deepcopy\nIMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {\n int,\n float,\n complex,\n str,\n bool,\n bytes,\n type,\n NoneType,\n FunctionType,\n BuiltinFunctionType,\n LambdaType,\n weakref.ref,\n CodeType,\n # note: including ModuleType will differ from behaviour of deepcopy by not producing error.\n # It might be not a good idea in general, but considering that this function used only internally\n # against default values of fields, this will allow to actually have a field with module as default value\n ModuleType,\n NotImplemented.__class__,\n Ellipsis.__class__,\n}\n\n# these are types that if empty, might be copied with simple copy() instead of deepcopy()\nBUILTIN_COLLECTIONS: Set[Type[Any]] = {\n list,\n set,\n tuple,\n frozenset,\n dict,\n OrderedDict,\n defaultdict,\n deque,\n}\n\n\ndef import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n from importlib import import_module\n\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e\n\n\ndef truncate(v: Union[str], *, max_len: int = 80) -> str:\n \"\"\"\n Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long\n \"\"\"\n warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)\n if isinstance(v, str) and len(v) > (max_len - 2):\n # -3 so quote + string + … + quote has correct length\n return (v[: (max_len - 3)] + '…').__repr__()\n try:\n v = v.__repr__()\n except TypeError:\n v = v.__class__.__repr__(v) # in case v is a type\n if len(v) > max_len:\n v = v[: max_len - 1] + '…'\n return v\n\n\ndef sequence_like(v: Type[Any]) -> bool:\n return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))\n\n\ndef validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:\n \"\"\"\n Ensure that the field's name does not shadow an existing attribute of the model.\n \"\"\"\n for base in bases:\n if getattr(base, field_name, None):\n raise NameError(\n f'Field name \"{field_name}\" shadows a BaseModel attribute; '\n f'use a different field name with \"alias=\\'{field_name}\\'\".'\n )\n\n\ndef lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...]]) -> bool:\n return isinstance(cls, type) and issubclass(cls, class_or_tuple)\n\n\ndef in_ipython() -> bool:\n \"\"\"\n Check whether we're in an ipython environment, including jupyter notebooks.\n \"\"\"\n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n\n\nKeyType = TypeVar('KeyType')\n\n\ndef deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:\n updated_mapping = mapping.copy()\n for updating_mapping in updating_mappings:\n for k, v in updating_mapping.items():\n if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):\n updated_mapping[k] = deep_update(updated_mapping[k], v)\n else:\n updated_mapping[k] = v\n return updated_mapping\n\n\ndef update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:\n mapping.update({k: v for k, v in update.items() if v is not None})\n\n\ndef almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n \"\"\"\n Return True if two floats are almost equal\n \"\"\"\n return abs(value_1 - value_2) <= delta\n\n\ndef generate_model_signature(\n init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']\n) -> 'Signature':\n \"\"\"\n Generate signature for model based on its fields\n \"\"\"\n from inspect import Parameter, Signature, signature\n\n present_params = signature(init).parameters.values()\n merged_params: Dict[str, Parameter] = {}\n var_kw = None\n use_var_kw = False\n\n for param in islice(present_params, 1, None): # skip self arg\n if param.kind is param.VAR_KEYWORD:\n var_kw = param\n continue\n merged_params[param.name] = param\n\n if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through\n allow_names = config.allow_population_by_field_name\n for field_name, field in fields.items():\n param_name = field.alias\n if field_name in merged_params or param_name in merged_params:\n continue\n elif not param_name.isidentifier():\n if allow_names and field_name.isidentifier():\n param_name = field_name\n else:\n use_var_kw = True\n continue\n\n # TODO: replace annotation with actual expected types once #1055 solved\n kwargs = {'default': field.default} if not field.required else {}\n merged_params[param_name] = Parameter(\n param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs\n )\n\n if config.extra is config.extra.allow:\n use_var_kw = True\n\n if var_kw and use_var_kw:\n # Make sure the parameter for extra kwargs\n # does not have the same name as a field\n default_model_signature = [\n ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),\n ('data', Parameter.VAR_KEYWORD),\n ]\n if [(p.name, p.kind) for p in present_params] == default_model_signature:\n # if this is the standard model signature, use extra_data as the extra args name\n var_kw_name = 'extra_data'\n else:\n # else start from var_kw\n var_kw_name = var_kw.name\n\n # generate a name that's definitely unique\n while var_kw_name in fields:\n var_kw_name += '_'\n merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)\n\n return Signature(parameters=list(merged_params.values()), return_annotation=None)\n\n\ndef get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:\n from .main import BaseModel # noqa: F811\n\n try:\n model_cls = obj.__pydantic_model__ # type: ignore\n except AttributeError:\n model_cls = obj\n\n if not issubclass(model_cls, BaseModel):\n raise TypeError('Unsupported type, must be either BaseModel or dataclass')\n return model_cls\n\n\ndef to_camel(string: str) -> str:\n return ''.join(word.capitalize() for word in string.split('_'))\n\n\nT = TypeVar('T')\n\n\ndef unique_list(input_list: Union[List[T], Tuple[T, ...]]) -> List[T]:\n \"\"\"\n Make a list unique while maintaining order.\n \"\"\"\n result = []\n unique_set = set()\n for v in input_list:\n if v not in unique_set:\n unique_set.add(v)\n result.append(v)\n\n return result\n\n\ndef update_normalized_all(\n item: Union['AbstractSetIntStr', 'MappingIntStrAny'],\n all_items: Union['AbstractSetIntStr', 'MappingIntStrAny'],\n) -> Union['AbstractSetIntStr', 'MappingIntStrAny']:\n \"\"\"\n Update item based on what all items contains.\n\n The update is done based on these cases:\n\n - if both arguments are dicts then each key-value pair existing in ``all_items`` is merged into ``item``,\n while the rest of the key-value pairs are updated recursively with this function.\n - if both arguments are sets then they are just merged.\n - if ``item`` is a dictionary and ``all_items`` is a set then all values of it are added to ``item`` as\n ``key: ...``.\n - if ``item`` is set and ``all_items`` is a dictionary, then ``item`` is converted to a dictionary and then the\n key-value pairs of ``all_items`` are merged in it.\n\n During recursive calls, there is a case where ``all_items`` can be an Ellipsis, in which case the ``item`` is\n returned as is.\n \"\"\"\n if not item:\n return all_items\n if isinstance(item, dict) and isinstance(all_items, dict):\n item = dict(item)\n item.update({k: update_normalized_all(item[k], v) for k, v in all_items.items() if k in item})\n item.update({k: v for k, v in all_items.items() if k not in item})\n return item\n if isinstance(item, set) and isinstance(all_items, set):\n item = set(item)\n item.update(all_items)\n return item\n if isinstance(item, dict) and isinstance(all_items, set):\n item = dict(item)\n item.update({k: ... for k in all_items if k not in item})\n return item\n if isinstance(item, set) and isinstance(all_items, dict):\n item = {k: ... for k in item}\n item.update({k: v for k, v in all_items.items() if k not in item})\n return item\n # Case when item or all_items is ... (in recursive calls).\n return item\n\n\nclass PyObjectStr(str):\n \"\"\"\n String class where repr doesn't include quotes. Useful with Representation when you want to return a string\n representation of something that valid (or pseudo-valid) python.\n \"\"\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Representation:\n \"\"\"\n Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.\n\n __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations\n of objects.\n \"\"\"\n\n __slots__: Tuple[str, ...] = tuple()\n\n def __repr_args__(self) -> 'ReprArgs':\n \"\"\"\n Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.\n\n Can either return:\n * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`\n * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`\n \"\"\"\n attrs = ((s, getattr(self, s)) for s in self.__slots__)\n return [(a, v) for a, v in attrs if v is not None]\n\n def __repr_name__(self) -> str:\n \"\"\"\n Name of the instance's class, used in __repr__.\n \"\"\"\n return self.__class__.__name__\n\n def __repr_str__(self, join_str: str) -> str:\n return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())\n\n def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:\n \"\"\"\n Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects\n \"\"\"\n yield self.__repr_name__() + '('\n yield 1\n for name, value in self.__repr_args__():\n if name is not None:\n yield name + '='\n yield fmt(value)\n yield ','\n yield 0\n yield -1\n yield ')'\n\n def __str__(self) -> str:\n return self.__repr_str__(' ')\n\n def __repr__(self) -> str:\n return f'{self.__repr_name__()}({self.__repr_str__(\", \")})'\n\n\nclass GetterDict(Representation):\n \"\"\"\n Hack to make object's smell just enough like dicts for validate_model.\n\n We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.\n \"\"\"\n\n __slots__ = ('_obj',)\n\n def __init__(self, obj: Any):\n self._obj = obj\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self._obj, key)\n except AttributeError as e:\n raise KeyError(key) from e\n\n def get(self, key: Any, default: Any = None) -> Any:\n return getattr(self._obj, key, default)\n\n def extra_keys(self) -> Set[Any]:\n \"\"\"\n We don't want to get any other attributes of obj if the model didn't explicitly ask for them\n \"\"\"\n return set()\n\n def keys(self) -> List[Any]:\n \"\"\"\n Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python\n dictionaries.\n \"\"\"\n return list(self)\n\n def values(self) -> List[Any]:\n return [self[k] for k in self]\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n for k in self:\n yield k, self.get(k)\n\n def __iter__(self) -> Iterator[str]:\n for name in dir(self._obj):\n if not name.startswith('_'):\n yield name\n\n def __len__(self) -> int:\n return sum(1 for _ in self)\n\n def __contains__(self, item: Any) -> bool:\n return item in self.keys()\n\n def __eq__(self, other: Any) -> bool:\n return dict(self) == dict(other.items())\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, dict(self))]\n\n def __repr_name__(self) -> str:\n return f'GetterDict[{display_as_type(self._obj)}]'\n\n\nclass ValueItems(Representation):\n \"\"\"\n Class for more convenient calculation of excluded or included fields on values.\n \"\"\"\n\n __slots__ = ('_items', '_type')\n\n def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:\n if TYPE_CHECKING:\n self._items: Union['AbstractSetIntStr', 'MappingIntStrAny']\n self._type: Type[Union[set, dict]] # type: ignore\n\n # For further type checks speed-up\n if isinstance(items, Mapping):\n self._type = dict\n elif isinstance(items, AbstractSet):\n self._type = set\n else:\n raise TypeError(f'Unexpected type of exclude value {items.__class__}')\n\n if isinstance(value, (list, tuple)):\n items = self._normalize_indexes(items, len(value))\n\n self._items = items\n\n @no_type_check\n def is_excluded(self, item: Any) -> bool:\n \"\"\"\n Check if item is fully excluded\n (value considered excluded if self._type is set and item contained in self._items\n or self._type is dict and self._items.get(item) is ...\n\n :param item: key or index of a value\n \"\"\"\n if self._type is set:\n return item in self._items\n return self._items.get(item) is ...\n\n @no_type_check\n def is_included(self, item: Any) -> bool:\n \"\"\"\n Check if value is contained in self._items\n\n :param item: key or index of value\n \"\"\"\n return item in self._items\n\n @no_type_check\n def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:\n \"\"\"\n :param e: key or index of element on value\n :return: raw values for elemet if self._items is dict and contain needed element\n \"\"\"\n\n if self._type is dict:\n item = self._items.get(e)\n return item if item is not ... else None\n return None\n\n @no_type_check\n def _normalize_indexes(\n self, items: Union['AbstractSetIntStr', 'MappingIntStrAny'], v_length: int\n ) -> Union['AbstractSetIntStr', 'DictIntStrAny']:\n \"\"\"\n :param items: dict or set of indexes which will be normalized\n :param v_length: length of sequence indexes of which will be\n\n >>> self._normalize_indexes({0, -2, -1}, 4)\n {0, 2, 3}\n >>> self._normalize_indexes({'__all__'}, 4)\n {0, 1, 2, 3}\n \"\"\"\n if any(not isinstance(i, int) and i != '__all__' for i in items):\n raise TypeError(\n 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '\n 'expected integer keys or keyword \"__all__\"'\n )\n if self._type is set:\n if '__all__' in items:\n if items != {'__all__'}:\n raise ValueError('set with keyword \"__all__\" must not contain other elements')\n return {i for i in range(v_length)}\n return {v_length + i if i < 0 else i for i in items}\n else:\n all_items = items.get('__all__')\n for i, v in items.items():\n if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or v is ...):\n raise TypeError(f'Unexpected type of exclude value for index \"{i}\" {v.__class__}')\n normalized_items = {v_length + i if i < 0 else i: v for i, v in items.items() if i != '__all__'}\n if all_items:\n default: Type[Union[Set[Any], Dict[Any, Any]]]\n if isinstance(all_items, Mapping):\n default = dict\n elif isinstance(all_items, AbstractSet):\n default = set\n else:\n for i in range(v_length):\n normalized_items.setdefault(i, ...)\n return normalized_items\n for i in range(v_length):\n normalized_item = normalized_items.setdefault(i, default())\n if normalized_item is not ...:\n normalized_items[i] = update_normalized_all(normalized_item, all_items)\n return normalized_items\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, self._items)]\n\n\nclass ClassAttribute:\n \"\"\"\n Hide class attribute from its instances\n \"\"\"\n\n __slots__ = (\n 'name',\n 'value',\n )\n\n def __init__(self, name: str, value: Any) -> None:\n self.name = name\n self.value = value\n\n def __get__(self, instance: Any, owner: Type[Any]) -> None:\n if instance is None:\n return self.value\n raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')\n\n\npath_types = {\n 'is_dir': 'directory',\n 'is_file': 'file',\n 'is_mount': 'mount point',\n 'is_symlink': 'symlink',\n 'is_block_device': 'block device',\n 'is_char_device': 'char device',\n 'is_fifo': 'FIFO',\n 'is_socket': 'socket',\n}\n\n\ndef path_type(p: 'Path') -> str:\n \"\"\"\n Find out what sort of thing a path is.\n \"\"\"\n assert p.exists(), 'path does not exist'\n for method, name in path_types.items():\n if getattr(p, method)():\n return name\n\n return 'unknown'\n\n\nObj = TypeVar('Obj')\n\n\ndef smart_deepcopy(obj: Obj) -> Obj:\n \"\"\"\n Return type as is for immutable built-in types\n Use obj.copy() for built-in empty collections\n Use copy.deepcopy() for non-empty collections and unknown objects\n \"\"\"\n\n obj_type = obj.__class__\n if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:\n return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway\n elif not obj and obj_type in BUILTIN_COLLECTIONS:\n # faster way for empty collections, no need to copy its members\n return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method\n return deepcopy(obj) # slowest way when we actually might need a deepcopy\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef is_valid_private_name(name: str) -> bool:\n return not is_valid_field(name) and name not in {'__annotations__', '__module__', '__annotations__', '__qualname__'}\n", "path": "pydantic/utils.py" } ]
[ { "content": "import warnings\nimport weakref\nfrom collections import OrderedDict, defaultdict, deque\nfrom copy import deepcopy\nfrom itertools import islice\nfrom types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n Generator,\n Iterator,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n no_type_check,\n)\n\nfrom .typing import NoneType, display_as_type\nfrom .version import version_info\n\nif TYPE_CHECKING:\n from inspect import Signature\n from pathlib import Path\n\n from .dataclasses import Dataclass # noqa: F401\n from .fields import ModelField # noqa: F401\n from .main import BaseConfig, BaseModel # noqa: F401\n from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs # noqa: F401\n\n__all__ = (\n 'import_string',\n 'sequence_like',\n 'validate_field_name',\n 'lenient_issubclass',\n 'in_ipython',\n 'deep_update',\n 'update_not_none',\n 'almost_equal_floats',\n 'get_model',\n 'to_camel',\n 'is_valid_field',\n 'smart_deepcopy',\n 'PyObjectStr',\n 'Representation',\n 'GetterDict',\n 'ValueItems',\n 'version_info', # required here to match behaviour in v1.3\n 'ClassAttribute',\n 'path_type',\n 'ROOT_KEY',\n)\n\nROOT_KEY = '__root__'\n# these are types that are returned unchanged by deepcopy\nIMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {\n int,\n float,\n complex,\n str,\n bool,\n bytes,\n type,\n NoneType,\n FunctionType,\n BuiltinFunctionType,\n LambdaType,\n weakref.ref,\n CodeType,\n # note: including ModuleType will differ from behaviour of deepcopy by not producing error.\n # It might be not a good idea in general, but considering that this function used only internally\n # against default values of fields, this will allow to actually have a field with module as default value\n ModuleType,\n NotImplemented.__class__,\n Ellipsis.__class__,\n}\n\n# these are types that if empty, might be copied with simple copy() instead of deepcopy()\nBUILTIN_COLLECTIONS: Set[Type[Any]] = {\n list,\n set,\n tuple,\n frozenset,\n dict,\n OrderedDict,\n defaultdict,\n deque,\n}\n\n\ndef import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n from importlib import import_module\n\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e\n\n\ndef truncate(v: Union[str], *, max_len: int = 80) -> str:\n \"\"\"\n Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long\n \"\"\"\n warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)\n if isinstance(v, str) and len(v) > (max_len - 2):\n # -3 so quote + string + … + quote has correct length\n return (v[: (max_len - 3)] + '…').__repr__()\n try:\n v = v.__repr__()\n except TypeError:\n v = v.__class__.__repr__(v) # in case v is a type\n if len(v) > max_len:\n v = v[: max_len - 1] + '…'\n return v\n\n\ndef sequence_like(v: Type[Any]) -> bool:\n return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))\n\n\ndef validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:\n \"\"\"\n Ensure that the field's name does not shadow an existing attribute of the model.\n \"\"\"\n for base in bases:\n if getattr(base, field_name, None):\n raise NameError(\n f'Field name \"{field_name}\" shadows a BaseModel attribute; '\n f'use a different field name with \"alias=\\'{field_name}\\'\".'\n )\n\n\ndef lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...]]) -> bool:\n return isinstance(cls, type) and issubclass(cls, class_or_tuple)\n\n\ndef in_ipython() -> bool:\n \"\"\"\n Check whether we're in an ipython environment, including jupyter notebooks.\n \"\"\"\n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n\n\nKeyType = TypeVar('KeyType')\n\n\ndef deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:\n updated_mapping = mapping.copy()\n for updating_mapping in updating_mappings:\n for k, v in updating_mapping.items():\n if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):\n updated_mapping[k] = deep_update(updated_mapping[k], v)\n else:\n updated_mapping[k] = v\n return updated_mapping\n\n\ndef update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:\n mapping.update({k: v for k, v in update.items() if v is not None})\n\n\ndef almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n \"\"\"\n Return True if two floats are almost equal\n \"\"\"\n return abs(value_1 - value_2) <= delta\n\n\ndef generate_model_signature(\n init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']\n) -> 'Signature':\n \"\"\"\n Generate signature for model based on its fields\n \"\"\"\n from inspect import Parameter, Signature, signature\n\n present_params = signature(init).parameters.values()\n merged_params: Dict[str, Parameter] = {}\n var_kw = None\n use_var_kw = False\n\n for param in islice(present_params, 1, None): # skip self arg\n if param.kind is param.VAR_KEYWORD:\n var_kw = param\n continue\n merged_params[param.name] = param\n\n if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through\n allow_names = config.allow_population_by_field_name\n for field_name, field in fields.items():\n param_name = field.alias\n if field_name in merged_params or param_name in merged_params:\n continue\n elif not param_name.isidentifier():\n if allow_names and field_name.isidentifier():\n param_name = field_name\n else:\n use_var_kw = True\n continue\n\n # TODO: replace annotation with actual expected types once #1055 solved\n kwargs = {'default': field.default} if not field.required else {}\n merged_params[param_name] = Parameter(\n param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs\n )\n\n if config.extra is config.extra.allow:\n use_var_kw = True\n\n if var_kw and use_var_kw:\n # Make sure the parameter for extra kwargs\n # does not have the same name as a field\n default_model_signature = [\n ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),\n ('data', Parameter.VAR_KEYWORD),\n ]\n if [(p.name, p.kind) for p in present_params] == default_model_signature:\n # if this is the standard model signature, use extra_data as the extra args name\n var_kw_name = 'extra_data'\n else:\n # else start from var_kw\n var_kw_name = var_kw.name\n\n # generate a name that's definitely unique\n while var_kw_name in fields:\n var_kw_name += '_'\n merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)\n\n return Signature(parameters=list(merged_params.values()), return_annotation=None)\n\n\ndef get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:\n from .main import BaseModel # noqa: F811\n\n try:\n model_cls = obj.__pydantic_model__ # type: ignore\n except AttributeError:\n model_cls = obj\n\n if not issubclass(model_cls, BaseModel):\n raise TypeError('Unsupported type, must be either BaseModel or dataclass')\n return model_cls\n\n\ndef to_camel(string: str) -> str:\n return ''.join(word.capitalize() for word in string.split('_'))\n\n\nT = TypeVar('T')\n\n\ndef unique_list(input_list: Union[List[T], Tuple[T, ...]]) -> List[T]:\n \"\"\"\n Make a list unique while maintaining order.\n \"\"\"\n result = []\n unique_set = set()\n for v in input_list:\n if v not in unique_set:\n unique_set.add(v)\n result.append(v)\n\n return result\n\n\ndef update_normalized_all(\n item: Union['AbstractSetIntStr', 'MappingIntStrAny'],\n all_items: Union['AbstractSetIntStr', 'MappingIntStrAny'],\n) -> Union['AbstractSetIntStr', 'MappingIntStrAny']:\n \"\"\"\n Update item based on what all items contains.\n\n The update is done based on these cases:\n\n - if both arguments are dicts then each key-value pair existing in ``all_items`` is merged into ``item``,\n while the rest of the key-value pairs are updated recursively with this function.\n - if both arguments are sets then they are just merged.\n - if ``item`` is a dictionary and ``all_items`` is a set then all values of it are added to ``item`` as\n ``key: ...``.\n - if ``item`` is set and ``all_items`` is a dictionary, then ``item`` is converted to a dictionary and then the\n key-value pairs of ``all_items`` are merged in it.\n\n During recursive calls, there is a case where ``all_items`` can be an Ellipsis, in which case the ``item`` is\n returned as is.\n \"\"\"\n if not item:\n return all_items\n if isinstance(item, dict) and isinstance(all_items, dict):\n item = dict(item)\n item.update({k: update_normalized_all(item[k], v) for k, v in all_items.items() if k in item})\n item.update({k: v for k, v in all_items.items() if k not in item})\n return item\n if isinstance(item, set) and isinstance(all_items, set):\n item = set(item)\n item.update(all_items)\n return item\n if isinstance(item, dict) and isinstance(all_items, set):\n item = dict(item)\n item.update({k: ... for k in all_items if k not in item})\n return item\n if isinstance(item, set) and isinstance(all_items, dict):\n item = {k: ... for k in item}\n item.update({k: v for k, v in all_items.items() if k not in item})\n return item\n # Case when item or all_items is ... (in recursive calls).\n return item\n\n\nclass PyObjectStr(str):\n \"\"\"\n String class where repr doesn't include quotes. Useful with Representation when you want to return a string\n representation of something that valid (or pseudo-valid) python.\n \"\"\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Representation:\n \"\"\"\n Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.\n\n __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations\n of objects.\n \"\"\"\n\n __slots__: Tuple[str, ...] = tuple()\n\n def __repr_args__(self) -> 'ReprArgs':\n \"\"\"\n Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.\n\n Can either return:\n * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`\n * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`\n \"\"\"\n attrs = ((s, getattr(self, s)) for s in self.__slots__)\n return [(a, v) for a, v in attrs if v is not None]\n\n def __repr_name__(self) -> str:\n \"\"\"\n Name of the instance's class, used in __repr__.\n \"\"\"\n return self.__class__.__name__\n\n def __repr_str__(self, join_str: str) -> str:\n return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())\n\n def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:\n \"\"\"\n Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects\n \"\"\"\n yield self.__repr_name__() + '('\n yield 1\n for name, value in self.__repr_args__():\n if name is not None:\n yield name + '='\n yield fmt(value)\n yield ','\n yield 0\n yield -1\n yield ')'\n\n def __str__(self) -> str:\n return self.__repr_str__(' ')\n\n def __repr__(self) -> str:\n return f'{self.__repr_name__()}({self.__repr_str__(\", \")})'\n\n\nclass GetterDict(Representation):\n \"\"\"\n Hack to make object's smell just enough like dicts for validate_model.\n\n We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.\n \"\"\"\n\n __slots__ = ('_obj',)\n\n def __init__(self, obj: Any):\n self._obj = obj\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self._obj, key)\n except AttributeError as e:\n raise KeyError(key) from e\n\n def get(self, key: Any, default: Any = None) -> Any:\n return getattr(self._obj, key, default)\n\n def extra_keys(self) -> Set[Any]:\n \"\"\"\n We don't want to get any other attributes of obj if the model didn't explicitly ask for them\n \"\"\"\n return set()\n\n def keys(self) -> List[Any]:\n \"\"\"\n Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python\n dictionaries.\n \"\"\"\n return list(self)\n\n def values(self) -> List[Any]:\n return [self[k] for k in self]\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n for k in self:\n yield k, self.get(k)\n\n def __iter__(self) -> Iterator[str]:\n for name in dir(self._obj):\n if not name.startswith('_'):\n yield name\n\n def __len__(self) -> int:\n return sum(1 for _ in self)\n\n def __contains__(self, item: Any) -> bool:\n return item in self.keys()\n\n def __eq__(self, other: Any) -> bool:\n return dict(self) == dict(other.items())\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, dict(self))]\n\n def __repr_name__(self) -> str:\n return f'GetterDict[{display_as_type(self._obj)}]'\n\n\nclass ValueItems(Representation):\n \"\"\"\n Class for more convenient calculation of excluded or included fields on values.\n \"\"\"\n\n __slots__ = ('_items', '_type')\n\n def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:\n if TYPE_CHECKING:\n self._items: Union['AbstractSetIntStr', 'MappingIntStrAny']\n self._type: Type[Union[set, dict]] # type: ignore\n\n # For further type checks speed-up\n if isinstance(items, Mapping):\n self._type = dict\n elif isinstance(items, AbstractSet):\n self._type = set\n else:\n raise TypeError(f'Unexpected type of exclude value {items.__class__}')\n\n if isinstance(value, (list, tuple)):\n items = self._normalize_indexes(items, len(value))\n\n self._items = items\n\n @no_type_check\n def is_excluded(self, item: Any) -> bool:\n \"\"\"\n Check if item is fully excluded\n (value considered excluded if self._type is set and item contained in self._items\n or self._type is dict and self._items.get(item) is ...\n\n :param item: key or index of a value\n \"\"\"\n if self._type is set:\n return item in self._items\n return self._items.get(item) is ...\n\n @no_type_check\n def is_included(self, item: Any) -> bool:\n \"\"\"\n Check if value is contained in self._items\n\n :param item: key or index of value\n \"\"\"\n return item in self._items\n\n @no_type_check\n def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:\n \"\"\"\n :param e: key or index of element on value\n :return: raw values for elemet if self._items is dict and contain needed element\n \"\"\"\n\n if self._type is dict:\n item = self._items.get(e)\n return item if item is not ... else None\n return None\n\n @no_type_check\n def _normalize_indexes(\n self, items: Union['AbstractSetIntStr', 'MappingIntStrAny'], v_length: int\n ) -> Union['AbstractSetIntStr', 'DictIntStrAny']:\n \"\"\"\n :param items: dict or set of indexes which will be normalized\n :param v_length: length of sequence indexes of which will be\n\n >>> self._normalize_indexes({0, -2, -1}, 4)\n {0, 2, 3}\n >>> self._normalize_indexes({'__all__'}, 4)\n {0, 1, 2, 3}\n \"\"\"\n if any(not isinstance(i, int) and i != '__all__' for i in items):\n raise TypeError(\n 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '\n 'expected integer keys or keyword \"__all__\"'\n )\n if self._type is set:\n if '__all__' in items:\n if items != {'__all__'}:\n raise ValueError('set with keyword \"__all__\" must not contain other elements')\n return {i for i in range(v_length)}\n return {v_length + i if i < 0 else i for i in items}\n else:\n all_items = items.get('__all__')\n for i, v in items.items():\n if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or v is ...):\n raise TypeError(f'Unexpected type of exclude value for index \"{i}\" {v.__class__}')\n normalized_items = {v_length + i if i < 0 else i: v for i, v in items.items() if i != '__all__'}\n if all_items:\n default: Type[Union[Set[Any], Dict[Any, Any]]]\n if isinstance(all_items, Mapping):\n default = dict\n elif isinstance(all_items, AbstractSet):\n default = set\n else:\n for i in range(v_length):\n normalized_items.setdefault(i, ...)\n return normalized_items\n for i in range(v_length):\n normalized_item = normalized_items.setdefault(i, default())\n if normalized_item is not ...:\n normalized_items[i] = update_normalized_all(normalized_item, all_items)\n return normalized_items\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, self._items)]\n\n\nclass ClassAttribute:\n \"\"\"\n Hide class attribute from its instances\n \"\"\"\n\n __slots__ = (\n 'name',\n 'value',\n )\n\n def __init__(self, name: str, value: Any) -> None:\n self.name = name\n self.value = value\n\n def __get__(self, instance: Any, owner: Type[Any]) -> None:\n if instance is None:\n return self.value\n raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')\n\n\npath_types = {\n 'is_dir': 'directory',\n 'is_file': 'file',\n 'is_mount': 'mount point',\n 'is_symlink': 'symlink',\n 'is_block_device': 'block device',\n 'is_char_device': 'char device',\n 'is_fifo': 'FIFO',\n 'is_socket': 'socket',\n}\n\n\ndef path_type(p: 'Path') -> str:\n \"\"\"\n Find out what sort of thing a path is.\n \"\"\"\n assert p.exists(), 'path does not exist'\n for method, name in path_types.items():\n if getattr(p, method)():\n return name\n\n return 'unknown'\n\n\nObj = TypeVar('Obj')\n\n\ndef smart_deepcopy(obj: Obj) -> Obj:\n \"\"\"\n Return type as is for immutable built-in types\n Use obj.copy() for built-in empty collections\n Use copy.deepcopy() for non-empty collections and unknown objects\n \"\"\"\n\n obj_type = obj.__class__\n if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:\n return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway\n elif not obj and obj_type in BUILTIN_COLLECTIONS:\n # faster way for empty collections, no need to copy its members\n return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method\n return deepcopy(obj) # slowest way when we actually might need a deepcopy\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef is_valid_private_name(name: str) -> bool:\n return not is_valid_field(name) and name not in {'__annotations__', '__classcell__', '__module__', '__qualname__'}\n", "path": "pydantic/utils.py" } ]
diff --git a/Makefile b/Makefile index 8aa686f4b6e..cc5d66bcb57 100644 --- a/Makefile +++ b/Makefile @@ -134,5 +134,5 @@ docs-serve: .PHONY: publish-docs publish-docs: zip -r site.zip site - @curl -f -H "Content-Type: application/zip" -H "Authorization: Bearer ${NETLIFY}" \ + @curl -H "Content-Type: application/zip" -H "Authorization: Bearer ${NETLIFY}" \ --data-binary "@site.zip" https://api.netlify.com/api/v1/sites/pydantic-docs.netlify.com/deploys diff --git a/changes/2047-samuelcolvin.md b/changes/2047-samuelcolvin.md new file mode 100644 index 00000000000..6c4c31cf348 --- /dev/null +++ b/changes/2047-samuelcolvin.md @@ -0,0 +1 @@ +fix `underscore_attrs_are_private` causing `TypeError` when overriding `__init__` diff --git a/pydantic/utils.py b/pydantic/utils.py index 96b792fbe1b..4592223d13c 100644 --- a/pydantic/utils.py +++ b/pydantic/utils.py @@ -631,4 +631,4 @@ def is_valid_field(name: str) -> bool: def is_valid_private_name(name: str) -> bool: - return not is_valid_field(name) and name not in {'__annotations__', '__module__', '__annotations__', '__qualname__'} + return not is_valid_field(name) and name not in {'__annotations__', '__classcell__', '__module__', '__qualname__'} diff --git a/tests/test_private_attributes.py b/tests/test_private_attributes.py index 07f702c7f22..319fdab1462 100644 --- a/tests/test_private_attributes.py +++ b/tests/test_private_attributes.py @@ -160,3 +160,20 @@ def __init__(self): def test_default_and_default_factory_used_error(): with pytest.raises(TypeError, match='default and default_factory args can not be used together'): PrivateAttr(default=123, default_factory=lambda: 321) + + +def test_config_override_init(): + class MyModel(BaseModel): + x: str + _private_attr: int + + def __init__(self, **data) -> None: + super().__init__(**data) + self._private_attr = 123 + + class Config: + underscore_attrs_are_private = True + + m = MyModel(x='hello') + assert m.dict() == {'x': 'hello'} + assert m._private_attr == 123
Mailu__Mailu-2157
Admin User Quota sorting is off Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests. For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net). To be able to help you best, we need some more information. ## Before you open your issue - [ x] Check if no issue or pull-request for this already exists. - [ x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page) - [ x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur. - [ x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title. ## Environment & Versions ### Environment - [ x] docker-compose - [ ] kubernetes - [ ] docker swarm ### Versions 1.9 ## Description When sorting by quota in the Admin interface the numbers are sorted like text instead of by number and bytes. ## Expected behaviour kB is smaller than MB is smaller than GB ![quota](https://user-images.githubusercontent.com/561565/148793187-c7464ef3-b31e-48d5-966f-07d4be315412.PNG)
[ { "content": "\"\"\" Mailu admin app\n\"\"\"\n\nimport flask\nimport flask_bootstrap\n\nfrom mailu import utils, debug, models, manage, configuration\n\nimport hmac\n\ndef create_app_from_config(config):\n \"\"\" Create a new application based on the given configuration\n \"\"\"\n app = flask.Flask(__name__, static_folder='static', static_url_path='/static')\n app.cli.add_command(manage.mailu)\n\n # Bootstrap is used for error display and flash messages\n app.bootstrap = flask_bootstrap.Bootstrap(app)\n\n # Initialize application extensions\n config.init_app(app)\n models.db.init_app(app)\n utils.session.init_app(app)\n utils.limiter.init_app(app)\n utils.babel.init_app(app)\n utils.login.init_app(app)\n utils.login.user_loader(models.User.get)\n utils.proxy.init_app(app)\n utils.migrate.init_app(app, models.db)\n\n app.device_cookie_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('DEVICE_COOKIE_KEY', 'utf-8'), 'sha256').digest()\n app.temp_token_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('WEBMAIL_TEMP_TOKEN_KEY', 'utf-8'), 'sha256').digest()\n app.srs_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('SRS_KEY', 'utf-8'), 'sha256').digest()\n\n # Initialize list of translations\n app.config.translations = {\n str(locale): locale\n for locale in sorted(\n utils.babel.list_translations(),\n key=lambda l: l.get_language_name().title()\n )\n }\n\n # Initialize debugging tools\n if app.config.get(\"DEBUG\"):\n debug.toolbar.init_app(app)\n if app.config.get(\"DEBUG_PROFILER\"):\n debug.profiler.init_app(app)\n if assets := app.config.get('DEBUG_ASSETS'):\n app.static_folder = assets\n\n # Inject the default variables in the Jinja parser\n # TODO: move this to blueprints when needed\n @app.context_processor\n def inject_defaults():\n signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()\n return dict(\n signup_domains= signup_domains,\n config = app.config,\n )\n\n # Jinja filters\n @app.template_filter()\n def format_date(value):\n return utils.flask_babel.format_date(value) if value else ''\n\n @app.template_filter()\n def format_datetime(value):\n return utils.flask_babel.format_datetime(value) if value else ''\n\n # Import views\n from mailu import ui, internal, sso\n app.register_blueprint(ui.ui, url_prefix=app.config['WEB_ADMIN'])\n app.register_blueprint(internal.internal, url_prefix='/internal')\n app.register_blueprint(sso.sso, url_prefix='/sso')\n return app\n\n\ndef create_app():\n \"\"\" Create a new application based on the config module\n \"\"\"\n config = configuration.ConfigManager()\n return create_app_from_config(config)\n\n", "path": "core/admin/mailu/__init__.py" } ]
[ { "content": "\"\"\" Mailu admin app\n\"\"\"\n\nimport flask\nimport flask_bootstrap\n\nfrom mailu import utils, debug, models, manage, configuration\n\nimport hmac\n\ndef create_app_from_config(config):\n \"\"\" Create a new application based on the given configuration\n \"\"\"\n app = flask.Flask(__name__, static_folder='static', static_url_path='/static')\n app.cli.add_command(manage.mailu)\n\n # Bootstrap is used for error display and flash messages\n app.bootstrap = flask_bootstrap.Bootstrap(app)\n\n # Initialize application extensions\n config.init_app(app)\n models.db.init_app(app)\n utils.session.init_app(app)\n utils.limiter.init_app(app)\n utils.babel.init_app(app)\n utils.login.init_app(app)\n utils.login.user_loader(models.User.get)\n utils.proxy.init_app(app)\n utils.migrate.init_app(app, models.db)\n\n app.device_cookie_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('DEVICE_COOKIE_KEY', 'utf-8'), 'sha256').digest()\n app.temp_token_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('WEBMAIL_TEMP_TOKEN_KEY', 'utf-8'), 'sha256').digest()\n app.srs_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('SRS_KEY', 'utf-8'), 'sha256').digest()\n\n # Initialize list of translations\n app.config.translations = {\n str(locale): locale\n for locale in sorted(\n utils.babel.list_translations(),\n key=lambda l: l.get_language_name().title()\n )\n }\n\n # Initialize debugging tools\n if app.config.get(\"DEBUG\"):\n debug.toolbar.init_app(app)\n if app.config.get(\"DEBUG_PROFILER\"):\n debug.profiler.init_app(app)\n if assets := app.config.get('DEBUG_ASSETS'):\n app.static_folder = assets\n\n # Inject the default variables in the Jinja parser\n # TODO: move this to blueprints when needed\n @app.context_processor\n def inject_defaults():\n signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()\n return dict(\n signup_domains= signup_domains,\n config = app.config,\n get_locale = utils.get_locale,\n )\n\n # Jinja filters\n @app.template_filter()\n def format_date(value):\n return utils.flask_babel.format_date(value) if value else ''\n\n @app.template_filter()\n def format_datetime(value):\n return utils.flask_babel.format_datetime(value) if value else ''\n\n # Import views\n from mailu import ui, internal, sso\n app.register_blueprint(ui.ui, url_prefix=app.config['WEB_ADMIN'])\n app.register_blueprint(internal.internal, url_prefix='/internal')\n app.register_blueprint(sso.sso, url_prefix='/sso')\n return app\n\n\ndef create_app():\n \"\"\" Create a new application based on the config module\n \"\"\"\n config = configuration.ConfigManager()\n return create_app_from_config(config)\n\n", "path": "core/admin/mailu/__init__.py" } ]
diff --git a/core/admin/mailu/__init__.py b/core/admin/mailu/__init__.py index 5bb404473..3b88024f2 100644 --- a/core/admin/mailu/__init__.py +++ b/core/admin/mailu/__init__.py @@ -57,6 +57,7 @@ def inject_defaults(): return dict( signup_domains= signup_domains, config = app.config, + get_locale = utils.get_locale, ) # Jinja filters diff --git a/core/admin/mailu/sso/templates/base_sso.html b/core/admin/mailu/sso/templates/base_sso.html index 9dfb25a52..6d196d6ac 100644 --- a/core/admin/mailu/sso/templates/base_sso.html +++ b/core/admin/mailu/sso/templates/base_sso.html @@ -1,7 +1,7 @@ {%- import "macros.html" as macros %} {%- import "bootstrap/utils.html" as utils %} <!doctype html> -<html lang="{{ session['language'] }}" data-static="/static/"> +<html lang="{{ get_locale() }}" data-static="/static/"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> @@ -34,8 +34,8 @@ <ul class="navbar-nav ml-auto"> <li class="nav-item dropdown"> <a class="nav-link" data-toggle="dropdown" href="#" aria-expanded="false"> - <i class="fas fa-language text-xl" aria-hidden="true" title="{% trans %}change language{% endtrans %}"></i><span class="sr-only">Language</span> - <span class="badge badge-primary navbar-badge">{{ session['language'] }}</span></a> + <i class="fas fa-language text-xl" aria-hidden="true" title="{% trans %}change language{% endtrans %}"></i><span class="sr-only">{% trans %}change language{% endtrans %}</span> + <span class="badge badge-primary navbar-badge">{{ get_locale() }}</span></a> <div class="dropdown-menu dropdown-menu-right p-0" id="mailu-languages"> {%- for locale in config.translations.values() %} <a class="dropdown-item{% if locale|string() == session['language'] %} active{% endif %}" href="{{ url_for('sso.set_language', language=locale) }}">{{ locale.get_language_name().title() }}</a> diff --git a/core/admin/mailu/sso/templates/sidebar_sso.html b/core/admin/mailu/sso/templates/sidebar_sso.html index 86db33330..0f75a1ded 100644 --- a/core/admin/mailu/sso/templates/sidebar_sso.html +++ b/core/admin/mailu/sso/templates/sidebar_sso.html @@ -36,6 +36,12 @@ </a> </li> {%- endif %} + <li class="nav-item" role="none"> + <a href="{{ url_for('sso.login') }}" class="nav-link" role="menuitem"> + <i class="nav-icon fas fa-sign-in-alt"></i> + <p>{% trans %}Sign in{% endtrans %}</p> + </a> + </li> {#- User self-registration is only available when - Admin is available diff --git a/core/admin/mailu/ui/templates/admin/list.html b/core/admin/mailu/ui/templates/admin/list.html index 84d954a0f..e50c0ee62 100644 --- a/core/admin/mailu/ui/templates/admin/list.html +++ b/core/admin/mailu/ui/templates/admin/list.html @@ -14,7 +14,7 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Email{% endtrans %}</th> </tr> </thead> diff --git a/core/admin/mailu/ui/templates/alias/list.html b/core/admin/mailu/ui/templates/alias/list.html index 1e66668e9..833e44c18 100644 --- a/core/admin/mailu/ui/templates/alias/list.html +++ b/core/admin/mailu/ui/templates/alias/list.html @@ -16,7 +16,7 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Email{% endtrans %}</th> <th>{% trans %}Destination{% endtrans %}</th> <th>{% trans %}Comment{% endtrans %}</th> @@ -34,8 +34,8 @@ <td>{{ alias }}</td> <td>{{ alias.destination|join(', ') or '-' }}</td> <td>{{ alias.comment or '' }}</td> - <td>{{ alias.created_at | format_date }}</td> - <td>{{ alias.updated_at | format_date }}</td> + <td data-sort="{{ alias.created_at or '0000-00-00' }}">{{ alias.created_at | format_date }}</td> + <td data-sort="{{ alias.updated_at or '0000-00-00' }}">{{ alias.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/alternative/list.html b/core/admin/mailu/ui/templates/alternative/list.html index 4ca9f3c84..97482ac3f 100644 --- a/core/admin/mailu/ui/templates/alternative/list.html +++ b/core/admin/mailu/ui/templates/alternative/list.html @@ -16,7 +16,7 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Name{% endtrans %}</th> <th>{% trans %}Created{% endtrans %}</th> <th>{% trans %}Last edit{% endtrans %}</th> @@ -29,8 +29,8 @@ <a href="{{ url_for('.alternative_delete', alternative=alternative.name) }}" title="{% trans %}Delete{% endtrans %}"><i class="fa fa-trash"></i></a> </td> <td>{{ alternative }}</td> - <td>{{ alternative.created_at | format_date }}</td> - <td>{{ alternative.updated_at | format_date }}</td> + <td data-sort="{{ alternative.created_at or '0000-00-00' }}">{{ alternative.created_at | format_date }}</td> + <td data-sort="{{ alternative.updated_at or '0000-00-00' }}">{{ alternative.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/base.html b/core/admin/mailu/ui/templates/base.html index e646e579b..2ab21492b 100644 --- a/core/admin/mailu/ui/templates/base.html +++ b/core/admin/mailu/ui/templates/base.html @@ -1,7 +1,7 @@ {%- import "macros.html" as macros %} {%- import "bootstrap/utils.html" as utils %} <!doctype html> -<html lang="{{ session['language'] }}" data-static="/static/"> +<html lang="{{ get_locale() }}" data-static="/static/"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> @@ -34,8 +34,8 @@ <ul class="navbar-nav ml-auto"> <li class="nav-item dropdown"> <a class="nav-link" data-toggle="dropdown" href="#" aria-expanded="false"> - <i class="fas fa-language text-xl" aria-hidden="true" title="{% trans %}change language{% endtrans %}"></i><span class="sr-only">Language</span> - <span class="badge badge-primary navbar-badge">{{ session['language'] }}</span></a> + <i class="fas fa-language text-xl" aria-hidden="true" title="{% trans %}change language{% endtrans %}"></i><span class="sr-only">{% trans %}change language{% endtrans %}</span> + <span class="badge badge-primary navbar-badge">{{ get_locale() }}</span></a> <div class="dropdown-menu dropdown-menu-right p-0" id="mailu-languages"> {%- for locale in config.translations.values() %} <a class="dropdown-item{% if locale|string() == session['language'] %} active{% endif %}" href="{{ url_for('.set_language', language=locale) }}">{{ locale.get_language_name().title() }}</a> diff --git a/core/admin/mailu/ui/templates/client.html b/core/admin/mailu/ui/templates/client.html index bf0ba64e8..fddbe0d2d 100644 --- a/core/admin/mailu/ui/templates/client.html +++ b/core/admin/mailu/ui/templates/client.html @@ -9,7 +9,6 @@ {%- endblock %} {%- block content %} -<div>If you use an Apple device, <a href="/apple.mobileconfig">click here to autoconfigure it.</a></div> {%- call macros.table(title=_("Incoming mail"), datatable=False) %} <tbody> <tr> @@ -59,4 +58,8 @@ </tr> </tbody> {%- endcall %} +<blockquote> + {% trans %}If you use an Apple device,{% endtrans %} + <a href="/apple.mobileconfig">{% trans %}click here to autoconfigure it.{% endtrans %}</a> +</blockquote> {%- endblock %} diff --git a/core/admin/mailu/ui/templates/domain/list.html b/core/admin/mailu/ui/templates/domain/list.html index 61c09151f..4889bc8d0 100644 --- a/core/admin/mailu/ui/templates/domain/list.html +++ b/core/admin/mailu/ui/templates/domain/list.html @@ -11,15 +11,16 @@ {%- endblock %} {%- block content %} -{%- call macros.table() %} +{%- call macros.table(order='[[2,"asc"]]') %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> - <th>{% trans %}Manage{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Manage{% endtrans %}</th> <th>{% trans %}Domain name{% endtrans %}</th> <th>{% trans %}Mailbox count{% endtrans %}</th> <th>{% trans %}Alias count{% endtrans %}</th> <th>{% trans %}Comment{% endtrans %}</th> + <th>{% trans %}Enable sign-up{% endtrans %}</th> <th>{% trans %}Created{% endtrans %}</th> <th>{% trans %}Last edit{% endtrans %}</th> </tr> @@ -43,11 +44,12 @@ {%- endif %} </td> <td>{{ domain.name }}</td> - <td>{{ domain.users | count }} / {{ '∞' if domain.max_users == -1 else domain.max_users }}</td> - <td>{{ domain.aliases | count }} / {{ '∞' if domain.max_aliases == -1 else domain.max_aliases }}</td> + <td data-order="{{ domain.users | count }}">{{ domain.users | count }} / {{ '∞' if domain.max_users == -1 else domain.max_users }}</td> + <td data-order="{{ domain.aliases | count }}">{{ domain.aliases | count }} / {{ '∞' if domain.max_aliases == -1 else domain.max_aliases }}</td> <td>{{ domain.comment or '' }}</td> - <td>{{ domain.created_at | format_date }}</td> - <td>{{ domain.updated_at | format_date }}</td> + <td data-sort="{{ domain.signup_enabled }}">{% if domain.signup_enabled %}{% trans %}yes{% endtrans %}{% else %}{% trans %}no{% endtrans %}{% endif %}</td> + <td data-order="{{ domain.created_at or '0000-00-00' }}">{{ domain.created_at | format_date }}</td> + <td data-order="{{ domain.updated_at or '0000-00-00' }}">{{ domain.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/fetch/list.html b/core/admin/mailu/ui/templates/fetch/list.html index a504b7a25..7a527ce88 100644 --- a/core/admin/mailu/ui/templates/fetch/list.html +++ b/core/admin/mailu/ui/templates/fetch/list.html @@ -16,7 +16,7 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Endpoint{% endtrans %}</th> <th>{% trans %}Username{% endtrans %}</th> <th>{% trans %}Keep emails{% endtrans %}</th> @@ -35,11 +35,11 @@ </td> <td>{{ fetch.protocol }}{{ 's' if fetch.tls else '' }}://{{ fetch.host }}:{{ fetch.port }}</td> <td>{{ fetch.username }}</td> - <td>{% if fetch.keep %}{% trans %}yes{% endtrans %}{% else %}{% trans %}no{% endtrans %}{% endif %}</td> + <td data-sort="{{ fetch.keep }}">{% if fetch.keep %}{% trans %}yes{% endtrans %}{% else %}{% trans %}no{% endtrans %}{% endif %}</td> <td>{{ fetch.last_check | format_datetime or '-' }}</td> <td>{{ fetch.error or '-' }}</td> - <td>{{ fetch.created_at | format_date }}</td> - <td>{{ fetch.updated_at | format_date }}</td> + <td data-sort="{{ fetch.created_at or '0000-00-00' }}">{{ fetch.created_at | format_date }}</td> + <td data-sort="{{ fetch.updated_at or '0000-00-00' }}">{{ fetch.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/macros.html b/core/admin/mailu/ui/templates/macros.html index 46a769914..900842467 100644 --- a/core/admin/mailu/ui/templates/macros.html +++ b/core/admin/mailu/ui/templates/macros.html @@ -86,7 +86,7 @@ <h3 class="card-title">{{ title }}</h3> </div> {%- endmacro %} -{%- macro table(title=None, theme="primary", datatable=True) %} +{%- macro table(title=None, theme="primary", datatable=True, order=None) %} <div class="row"> <div class="col-lg-12"> <div class="card card-outline card-{{ theme }}"> @@ -96,7 +96,7 @@ <h3 class="card-title">{{ title }}</h3> </div> {%- endif %} <div class="card-body"> - <table class="table table-bordered{% if datatable %} dataTable{% endif %}"> + <table class="table table-bordered{% if datatable %} dataTable{% endif %}" data-order="{{ order or '[]' | e }}"> {{- caller() }} </table> </div> diff --git a/core/admin/mailu/ui/templates/manager/list.html b/core/admin/mailu/ui/templates/manager/list.html index 706594c41..95dc9f4a9 100644 --- a/core/admin/mailu/ui/templates/manager/list.html +++ b/core/admin/mailu/ui/templates/manager/list.html @@ -13,10 +13,10 @@ {%- endblock %} {%- block content %} -{%- call macros.table() %} +{%- call macros.table(order='[[2,"asc"]]') %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Email{% endtrans %}</th> </tr> </thead> diff --git a/core/admin/mailu/ui/templates/relay/list.html b/core/admin/mailu/ui/templates/relay/list.html index 1a23ee124..90d13e11f 100644 --- a/core/admin/mailu/ui/templates/relay/list.html +++ b/core/admin/mailu/ui/templates/relay/list.html @@ -11,10 +11,10 @@ {%- endblock %} {%- block content %} -{%- call macros.table() %} +{%- call macros.table(order='[[1,"asc"]]') %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Domain name{% endtrans %}</th> <th>{% trans %}Remote host{% endtrans %}</th> <th>{% trans %}Comment{% endtrans %}</th> @@ -32,8 +32,8 @@ <td>{{ relay.name }}</td> <td>{{ relay.smtp or '-' }}</td> <td>{{ relay.comment or '' }}</td> - <td>{{ relay.created_at | format_date }}</td> - <td>{{ relay.updated_at | format_date }}</td> + <td data-sort="{{ relay.created_at or '0000-00-00' }}">{{ relay.created_at | format_date }}</td> + <td data-sort="{{ relay.updated_at or '0000-00-00' }}">{{ relay.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/token/list.html b/core/admin/mailu/ui/templates/token/list.html index d7c487378..a6eee9c36 100644 --- a/core/admin/mailu/ui/templates/token/list.html +++ b/core/admin/mailu/ui/templates/token/list.html @@ -16,7 +16,7 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> <th>{% trans %}Comment{% endtrans %}</th> <th>{% trans %}Authorized IP{% endtrans %}</th> <th>{% trans %}Created{% endtrans %}</th> @@ -31,8 +31,8 @@ </td> <td>{{ token.comment }}</td> <td>{{ token.ip or "any" }}</td> - <td>{{ token.created_at | format_date }}</td> - <td>{{ token.updated_at | format_date }}</td> + <td data-sort="{{ token.created_at or '0000-00-00' }}">{{ token.created_at | format_date }}</td> + <td data-sort="{{ token.updated_at or '0000-00-00' }}">{{ token.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/user/list.html b/core/admin/mailu/ui/templates/user/list.html index 7faddab55..146262123 100644 --- a/core/admin/mailu/ui/templates/user/list.html +++ b/core/admin/mailu/ui/templates/user/list.html @@ -16,8 +16,8 @@ {%- call macros.table() %} <thead> <tr> - <th>{% trans %}Actions{% endtrans %}</th> - <th>{% trans %}User settings{% endtrans %}</th> + <th data-orderable="false">{% trans %}Actions{% endtrans %}</th> + <th data-orderable="false">{% trans %}User settings{% endtrans %}</th> <th>{% trans %}Email{% endtrans %}</th> <th>{% trans %}Features{% endtrans %}</th> <th>{% trans %}Quota{% endtrans %}</th> @@ -39,14 +39,14 @@ <a href="{{ url_for('.fetch_list', user_email=user.email) }}" title="{% trans %}Fetched accounts{% endtrans %}"><i class="fa fa-download"></i></a>&nbsp; </td> <td>{{ user }}</td> - <td> + <td data-sort="{{ user.enable_imap*2 + user.enable_pop }}"> {% if user.enable_imap %}<span class="badge bg-info">imap</span>{% endif %} {% if user.enable_pop %}<span class="badge bg-info">pop3</span>{% endif %} </td> - <td>{{ user.quota_bytes_used | filesizeformat }} / {{ (user.quota_bytes | filesizeformat) if user.quota_bytes else '∞' }}</td> + <td data-sort="{{ user.quota_bytes_used }}">{{ user.quota_bytes_used | filesizeformat }} / {{ (user.quota_bytes | filesizeformat) if user.quota_bytes else '∞' }}</td> <td>{{ user.comment or '-' }}</td> - <td>{{ user.created_at | format_date }}</td> - <td>{{ user.updated_at | format_date }}</td> + <td data-sort="{{ user.created_at or '0000-00-00' }}">{{ user.created_at | format_date }}</td> + <td data-sort="{{ user.updated_at or '0000-00-00' }}">{{ user.updated_at | format_date }}</td> </tr> {%- endfor %} </tbody> diff --git a/core/admin/mailu/ui/templates/user/signup_domain.html b/core/admin/mailu/ui/templates/user/signup_domain.html index a7db4c976..519f73ed1 100644 --- a/core/admin/mailu/ui/templates/user/signup_domain.html +++ b/core/admin/mailu/ui/templates/user/signup_domain.html @@ -9,18 +9,22 @@ {%- endblock %} {%- block content %} -{%- call macros.table() %} -<tr> - <th>{% trans %}Domain{% endtrans %}</th> - <th>{% trans %}Available slots{% endtrans %}</th> - <th>{% trans %}Quota{% endtrans %}</th> -</tr> +{%- call macros.table(order='[[1,"asc"]]') %} +<thead> + <tr> + <th>{% trans %}Domain{% endtrans %}</th> + <th>{% trans %}Available slots{% endtrans %}</th> + <th>{% trans %}Quota{% endtrans %}</th> + </tr> +</thead> +<tbody> {%- for domain_name, domain in available_domains.items() %} <tr> <td><a href="{{ url_for('.user_signup', domain_name=domain_name) }}">{{ domain_name }}</a></td> - <td>{{ '∞' if domain.max_users == -1 else domain.max_users - (domain.users | count)}}</td> - <td>{{ domain.max_quota_bytes or config['DEFAULT_QUOTA'] | filesizeformat }}</td> + <td data-sort="{{ -1 if domain.max_users == -1 else domain.max_users - (domain.users | count)}}">{{ '∞' if domain.max_users == -1 else domain.max_users - (domain.users | count)}}</td> + <td data-sort="{{ domain.max_quota_bytes or config['DEFAULT_QUOTA'] }}">{{ domain.max_quota_bytes or config['DEFAULT_QUOTA'] | filesizeformat }}</td> </tr> {%- endfor %} +</tbody> {%- endcall %} {%- endblock %}
blaze__blaze-1359
Unexpected exceptions when combining interactive and non-interactive symbols Exception when combining an `InteractiveSymbol` with a `Symbol`: ``` python In [25]: import blaze as bz In [26]: y = bz.Symbol('y', 'float64') In [29]: iris = bz.Data('./blaze/examples/data/iris.csv') In [30]: iris.sepal_length / y Out[30]: --------------------------------------------------------------------------- ValueError Traceback (most recent call last) /Users/ksmith/anaconda/lib/python2.7/site-packages/IPython/core/formatters.pyc in __call__(self, obj) 695 type_pprinters=self.type_printers, 696 deferred_pprinters=self.deferred_printers) --> 697 printer.pretty(obj) 698 printer.flush() 699 return stream.getvalue() /Users/ksmith/anaconda/lib/python2.7/site-packages/IPython/lib/pretty.pyc in pretty(self, obj) 381 if callable(meth): 382 return meth(obj, self, cycle) --> 383 return _default_pprint(obj, self, cycle) 384 finally: 385 self.end_group() /Users/ksmith/anaconda/lib/python2.7/site-packages/IPython/lib/pretty.pyc in _default_pprint(obj, p, cycle) 501 if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs: 502 # A user-provided repr. Find newlines and replace them with p.break_() --> 503 _repr_pprint(obj, p, cycle) 504 return 505 p.begin_group(1, '<') /Users/ksmith/anaconda/lib/python2.7/site-packages/IPython/lib/pretty.pyc in _repr_pprint(obj, p, cycle) 683 """A pprint that just redirects to the normal repr function.""" 684 # Find newlines and replace them with p.break_() --> 685 output = repr(obj) 686 for idx,output_line in enumerate(output.splitlines()): 687 if idx: /Users/ksmith/work/blaze/blaze-repo/blaze/interactive.pyc in expr_repr(expr, n) 267 isscalar(expr.dshape.measure) or 268 isinstance(expr.dshape.measure, Map))): --> 269 return repr_tables(expr, 10) 270 271 # Smallish arrays /Users/ksmith/work/blaze/blaze-repo/blaze/interactive.pyc in repr_tables(expr, n) 203 204 def repr_tables(expr, n=10): --> 205 result = concrete_head(expr, n).rename(columns={None: ''}) 206 207 if isinstance(result, (DataFrame, Series)): /Users/ksmith/work/blaze/blaze-repo/blaze/interactive.pyc in concrete_head(expr, n) 187 return odo(head, DataFrame) 188 else: --> 189 df = odo(head, DataFrame) 190 df.columns = [expr._name] 191 return df /Users/ksmith/work/odo/odo/odo.pyc in odo(source, target, **kwargs) 89 odo.append.append - Add things onto existing things 90 """ ---> 91 return into(target, source, **kwargs) /Users/ksmith/anaconda/lib/python2.7/site-packages/multipledispatch/dispatcher.pyc in __call__(self, *args, **kwargs) 162 self._cache[types] = func 163 try: --> 164 return func(*args, **kwargs) 165 166 except MDNotImplementedError: /Users/ksmith/work/blaze/blaze-repo/blaze/interactive.pyc in into(a, b, **kwargs) 311 @dispatch((object, type, str, unicode), Expr) 312 def into(a, b, **kwargs): --> 313 result = compute(b, **kwargs) 314 kwargs['dshape'] = b.dshape 315 return into(a, result, **kwargs) /Users/ksmith/anaconda/lib/python2.7/site-packages/multipledispatch/dispatcher.pyc in __call__(self, *args, **kwargs) 162 self._cache[types] = func 163 try: --> 164 return func(*args, **kwargs) 165 166 except MDNotImplementedError: /Users/ksmith/work/blaze/blaze-repo/blaze/interactive.pyc in compute(expr, **kwargs) 170 raise ValueError("No data resources found") 171 else: --> 172 return compute(expr, resources, **kwargs) 173 174 /Users/ksmith/anaconda/lib/python2.7/site-packages/multipledispatch/dispatcher.pyc in __call__(self, *args, **kwargs) 162 self._cache[types] = func 163 try: --> 164 return func(*args, **kwargs) 165 166 except MDNotImplementedError: /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in compute(expr, d, **kwargs) 401 d4 = d3 402 --> 403 result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs) 404 if post_compute_: 405 result = post_compute_(expr3, result, scope=d4) /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in top_then_bottom_then_top_again_etc(expr, scope, **kwargs) 165 166 # 2. Compute from the bottom until there is a data type change --> 167 expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs) 168 169 # 3. Re-optimize data and expressions /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in bottom_up_until_type_break(expr, scope, **kwargs) 306 # (this is the bottom part of bottom up) 307 exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs) --> 308 for i in inputs]) 309 310 # 2. Form new (much shallower) expression and new (more computed) scope /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in bottom_up_until_type_break(expr, scope, **kwargs) 306 # (this is the bottom part of bottom up) 307 exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs) --> 308 for i in inputs]) 309 310 # 2. Form new (much shallower) expression and new (more computed) scope /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in bottom_up_until_type_break(expr, scope, **kwargs) 306 # (this is the bottom part of bottom up) 307 exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs) --> 308 for i in inputs]) 309 310 # 2. Form new (much shallower) expression and new (more computed) scope ValueError: need more than 0 values to unpack ``` What should be the result here? To my mind, it should be another expression that's partially bound--`iris` is a bound symbol, `y` is unbound. It can't be evaluated because `y` is unbound. Getting an exception here is unexpected. This works: ``` python In [34]: bz.compute(iris.sepal_length / y, {y: 2.}) Out[34]: 0 2.55 1 2.45 2 2.35 3 2.30 4 2.50 5 2.70 ... Name: sepal_length, dtype: float64 ``` But this doesn't: ``` python In [35]: bz.compute(bz.transform(iris, ratio=iris.sepal_length / y), {y: 2}) --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-35-26dd478b38c1> in <module>() ----> 1 bz.compute(bz.transform(iris, ratio=iris.sepal_length / y), {y: 2}) /Users/ksmith/anaconda/lib/python2.7/site-packages/multipledispatch/dispatcher.pyc in __call__(self, *args, **kwargs) 162 self._cache[types] = func 163 try: --> 164 return func(*args, **kwargs) 165 166 except MDNotImplementedError: /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in compute(expr, d, **kwargs) 401 d4 = d3 402 --> 403 result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs) 404 if post_compute_: 405 result = post_compute_(expr3, result, scope=d4) /Users/ksmith/work/blaze/blaze-repo/blaze/compute/core.pyc in top_then_bottom_then_top_again_etc(expr, scope, **kwargs) 179 try: 180 expr3 = optimize_(expr2, *[scope3[leaf] --> 181 for leaf in expr2._leaves()]) 182 _d = dict(zip(expr2._leaves(), expr3._leaves())) 183 scope4 = dict((e._subs(_d), d) for e, d in scope3.items()) KeyError: y ```
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import Iterator\nimport decimal\nimport datetime\nfrom functools import reduce, partial\nimport itertools\nimport operator\nimport warnings\n\nfrom collections import Iterator\nfrom functools import reduce\n\nimport datashape\nfrom datashape import discover, Tuple, Record, DataShape, var, Map\nfrom datashape.predicates import iscollection, isscalar, isrecord, istabular\nimport numpy as np\nfrom odo import resource, odo\nfrom odo.utils import ignoring, copydoc\nfrom odo.compatibility import unicode\nfrom pandas import DataFrame, Series, Timestamp\n\n\nfrom .expr import Expr, Symbol, ndim\nfrom .dispatch import dispatch\nfrom .compatibility import _strtypes\n\n\n__all__ = ['Data', 'Table', 'into', 'to_html']\n\n\nnames = ('_%d' % i for i in itertools.count(1))\nnot_an_iterator = []\n\n\nwith ignoring(ImportError):\n import bcolz\n not_an_iterator.append(bcolz.carray)\n\n\nwith ignoring(ImportError):\n import pymongo\n not_an_iterator.append(pymongo.collection.Collection)\n not_an_iterator.append(pymongo.database.Database)\n\n\nclass InteractiveSymbol(Symbol):\n \"\"\"Interactive data.\n\n The ``Data`` object presents a familiar view onto a variety of forms of\n data. This user-level object provides an interactive experience to using\n Blaze's abstract expressions.\n\n Parameters\n ----------\n data : object\n Any type with ``discover`` and ``compute`` implementations\n fields : list, optional\n Field or column names, will be inferred from datasource if possible\n dshape : str or DataShape, optional\n DataShape describing input data\n name : str, optional\n A name for the data.\n\n Examples\n --------\n >>> t = Data([(1, 'Alice', 100),\n ... (2, 'Bob', -200),\n ... (3, 'Charlie', 300),\n ... (4, 'Denis', 400),\n ... (5, 'Edith', -500)],\n ... fields=['id', 'name', 'balance'])\n >>> t[t.balance < 0].name\n name\n 0 Bob\n 1 Edith\n \"\"\"\n __slots__ = '_hash', 'data', 'dshape', '_name'\n\n def __init__(self, data, dshape, name=None):\n self.data = data\n self.dshape = dshape\n self._name = name or (next(names)\n if isrecord(dshape.measure)\n else None)\n self._hash = None\n\n def _resources(self):\n return {self: self.data}\n\n @property\n def _hashargs(self):\n data = self.data\n try:\n # cannot use isinstance(data, Hashable)\n # some classes give a false positive\n hash(data)\n except TypeError:\n data = id(data)\n return data, self.dshape, self._name\n\n\n@copydoc(InteractiveSymbol)\ndef Data(data, dshape=None, name=None, fields=None, schema=None, **kwargs):\n if schema and dshape:\n raise ValueError(\"Please specify one of schema= or dshape= keyword\"\n \" arguments\")\n\n if isinstance(data, InteractiveSymbol):\n return Data(data.data, dshape, name, fields, schema, **kwargs)\n\n if isinstance(data, _strtypes):\n data = resource(data, schema=schema, dshape=dshape, **kwargs)\n if (isinstance(data, Iterator) and\n not isinstance(data, tuple(not_an_iterator))):\n data = tuple(data)\n if schema and not dshape:\n dshape = var * schema\n if dshape and isinstance(dshape, _strtypes):\n dshape = datashape.dshape(dshape)\n if not dshape:\n dshape = discover(data)\n types = None\n if isinstance(dshape.measure, Tuple) and fields:\n types = dshape[1].dshapes\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape + (schema,)))\n elif isscalar(dshape.measure) and fields:\n types = (dshape.measure,) * int(dshape[-2])\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape[:-1] + (schema,)))\n elif isrecord(dshape.measure) and fields:\n ds = discover(data)\n assert isrecord(ds.measure)\n names = ds.measure.names\n if names != fields:\n raise ValueError('data column names %s\\n'\n '\\tnot equal to fields parameter %s,\\n'\n '\\tuse Data(data).relabel(%s) to rename '\n 'fields' % (names,\n fields,\n ', '.join('%s=%r' % (k, v)\n for k, v in\n zip(names, fields))))\n types = dshape.measure.types\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape + (schema,)))\n\n ds = datashape.dshape(dshape)\n return InteractiveSymbol(data, ds, name)\n\n\ndef Table(*args, **kwargs):\n \"\"\" Deprecated, see Data instead \"\"\"\n warnings.warn(\"Table is deprecated, use Data instead\",\n DeprecationWarning)\n return Data(*args, **kwargs)\n\n\n@dispatch(InteractiveSymbol, dict)\ndef _subs(o, d):\n return o\n\n\n@dispatch(Expr)\ndef compute(expr, **kwargs):\n resources = expr._resources()\n if not resources:\n raise ValueError(\"No data resources found\")\n else:\n return compute(expr, resources, **kwargs)\n\n\ndef concrete_head(expr, n=10):\n \"\"\" Return head of computed expression \"\"\"\n if not expr._resources():\n raise ValueError(\"Expression does not contain data resources\")\n if not iscollection(expr.dshape):\n return compute(expr)\n\n head = expr.head(n + 1)\n\n if not iscollection(expr.dshape):\n return odo(head, object)\n elif isrecord(expr.dshape.measure):\n return odo(head, DataFrame)\n else:\n df = odo(head, DataFrame)\n df.columns = [expr._name]\n return df\n result = compute(head)\n\n if len(result) == 0:\n return DataFrame(columns=expr.fields)\n if isrecord(expr.dshape.measure):\n return odo(result, DataFrame, dshape=expr.dshape)\n else:\n df = odo(result, DataFrame, dshape=expr.dshape)\n df.columns = [expr._name]\n return df\n\n\ndef repr_tables(expr, n=10):\n result = concrete_head(expr, n).rename(columns={None: ''})\n\n if isinstance(result, (DataFrame, Series)):\n s = repr(result)\n if len(result) > 10:\n s = '\\n'.join(s.split('\\n')[:-1]) + '\\n...'\n return s\n else:\n return repr(result) # pragma: no cover\n\n\ndef numel(shape):\n if var in shape:\n return None\n if not shape:\n return 1\n return reduce(operator.mul, shape, 1)\n\n\ndef short_dshape(ds, nlines=5):\n s = datashape.coretypes.pprint(ds)\n lines = s.split('\\n')\n if len(lines) > 5:\n s = '\\n'.join(lines[:nlines]) + '\\n ...'\n return s\n\n\ndef coerce_to(typ, x, odo_kwargs=None):\n try:\n return typ(x)\n except TypeError:\n return odo(x, typ, **(odo_kwargs or {}))\n\n\ndef coerce_scalar(result, dshape, odo_kwargs=None):\n coerce_ = partial(coerce_to, x=result, odo_kwargs=odo_kwargs)\n if 'float' in dshape:\n return coerce_(float)\n if 'decimal' in dshape:\n return coerce_(decimal.Decimal)\n elif 'int' in dshape:\n return coerce_(int)\n elif 'bool' in dshape:\n return coerce_(bool)\n elif 'datetime' in dshape:\n return coerce_(Timestamp)\n elif 'date' in dshape:\n return coerce_(datetime.date)\n else:\n return result\n\n\ndef expr_repr(expr, n=10):\n # Pure Expressions, not interactive\n if not expr._resources():\n return str(expr)\n\n # Scalars\n if ndim(expr) == 0 and isscalar(expr.dshape):\n return repr(coerce_scalar(compute(expr), str(expr.dshape)))\n\n # Tables\n if (ndim(expr) == 1 and (istabular(expr.dshape) or\n isscalar(expr.dshape.measure) or\n isinstance(expr.dshape.measure, Map))):\n return repr_tables(expr, 10)\n\n # Smallish arrays\n if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:\n return repr(compute(expr))\n\n # Other\n dat = expr._resources().values()\n if len(dat) == 1:\n dat = list(dat)[0] # may be dict_values\n\n s = 'Data: %s' % dat\n if not isinstance(expr, Symbol):\n s += '\\nExpr: %s' % str(expr)\n s += '\\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)\n\n return s\n\n\n@dispatch(DataFrame)\ndef to_html(df):\n return df.to_html()\n\n\n@dispatch(Expr)\ndef to_html(expr):\n # Tables\n if not expr._resources() or ndim(expr) != 1:\n return to_html(repr(expr))\n return to_html(concrete_head(expr))\n\n\n@dispatch(object)\ndef to_html(o):\n return repr(o)\n\n\n@dispatch(_strtypes)\ndef to_html(o):\n return o.replace('\\n', '<br>')\n\n\n@dispatch((object, type, str, unicode), Expr)\ndef into(a, b, **kwargs):\n result = compute(b, **kwargs)\n kwargs['dshape'] = b.dshape\n return into(a, result, **kwargs)\n\n\ndef table_length(expr):\n try:\n return expr._len()\n except ValueError:\n return int(expr.count())\n\n\nExpr.__repr__ = expr_repr\nExpr._repr_html_ = lambda x: to_html(x)\nExpr.__len__ = table_length\n\n\ndef intonumpy(data, dtype=None, **kwargs):\n # TODO: Don't ignore other kwargs like copy\n result = odo(data, np.ndarray)\n if dtype and result.dtype != dtype:\n result = result.astype(dtype)\n return result\n\n\ndef convert_base(typ, x):\n x = compute(x)\n try:\n return typ(x)\n except:\n return typ(odo(x, typ))\n\nExpr.__array__ = intonumpy\nExpr.__int__ = lambda x: convert_base(int, x)\nExpr.__float__ = lambda x: convert_base(float, x)\nExpr.__complex__ = lambda x: convert_base(complex, x)\nExpr.__bool__ = lambda x: convert_base(bool, x)\nExpr.__nonzero__ = lambda x: convert_base(bool, x)\nExpr.__iter__ = into(Iterator)\n", "path": "blaze/interactive.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import Iterator\nimport decimal\nimport datetime\nfrom functools import reduce, partial\nimport itertools\nimport operator\nimport warnings\n\nfrom collections import Iterator\nfrom functools import reduce\n\nimport datashape\nfrom datashape import discover, Tuple, Record, DataShape, var, Map\nfrom datashape.predicates import iscollection, isscalar, isrecord, istabular\nimport numpy as np\nfrom odo import resource, odo\nfrom odo.utils import ignoring, copydoc\nfrom odo.compatibility import unicode\nfrom pandas import DataFrame, Series, Timestamp\n\n\nfrom .expr import Expr, Symbol, ndim\nfrom .dispatch import dispatch\nfrom .compatibility import _strtypes\n\n\n__all__ = ['Data', 'Table', 'into', 'to_html']\n\n\nnames = ('_%d' % i for i in itertools.count(1))\nnot_an_iterator = []\n\n\nwith ignoring(ImportError):\n import bcolz\n not_an_iterator.append(bcolz.carray)\n\n\nwith ignoring(ImportError):\n import pymongo\n not_an_iterator.append(pymongo.collection.Collection)\n not_an_iterator.append(pymongo.database.Database)\n\n\nclass InteractiveSymbol(Symbol):\n \"\"\"Interactive data.\n\n The ``Data`` object presents a familiar view onto a variety of forms of\n data. This user-level object provides an interactive experience to using\n Blaze's abstract expressions.\n\n Parameters\n ----------\n data : object\n Any type with ``discover`` and ``compute`` implementations\n fields : list, optional\n Field or column names, will be inferred from datasource if possible\n dshape : str or DataShape, optional\n DataShape describing input data\n name : str, optional\n A name for the data.\n\n Examples\n --------\n >>> t = Data([(1, 'Alice', 100),\n ... (2, 'Bob', -200),\n ... (3, 'Charlie', 300),\n ... (4, 'Denis', 400),\n ... (5, 'Edith', -500)],\n ... fields=['id', 'name', 'balance'])\n >>> t[t.balance < 0].name\n name\n 0 Bob\n 1 Edith\n \"\"\"\n __slots__ = '_hash', 'data', 'dshape', '_name'\n\n def __init__(self, data, dshape, name=None):\n self.data = data\n self.dshape = dshape\n self._name = name or (next(names)\n if isrecord(dshape.measure)\n else None)\n self._hash = None\n\n def _resources(self):\n return {self: self.data}\n\n @property\n def _hashargs(self):\n data = self.data\n try:\n # cannot use isinstance(data, Hashable)\n # some classes give a false positive\n hash(data)\n except TypeError:\n data = id(data)\n return data, self.dshape, self._name\n\n\n@copydoc(InteractiveSymbol)\ndef Data(data, dshape=None, name=None, fields=None, schema=None, **kwargs):\n if schema and dshape:\n raise ValueError(\"Please specify one of schema= or dshape= keyword\"\n \" arguments\")\n\n if isinstance(data, InteractiveSymbol):\n return Data(data.data, dshape, name, fields, schema, **kwargs)\n\n if isinstance(data, _strtypes):\n data = resource(data, schema=schema, dshape=dshape, **kwargs)\n if (isinstance(data, Iterator) and\n not isinstance(data, tuple(not_an_iterator))):\n data = tuple(data)\n if schema and not dshape:\n dshape = var * schema\n if dshape and isinstance(dshape, _strtypes):\n dshape = datashape.dshape(dshape)\n if not dshape:\n dshape = discover(data)\n types = None\n if isinstance(dshape.measure, Tuple) and fields:\n types = dshape[1].dshapes\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape + (schema,)))\n elif isscalar(dshape.measure) and fields:\n types = (dshape.measure,) * int(dshape[-2])\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape[:-1] + (schema,)))\n elif isrecord(dshape.measure) and fields:\n ds = discover(data)\n assert isrecord(ds.measure)\n names = ds.measure.names\n if names != fields:\n raise ValueError('data column names %s\\n'\n '\\tnot equal to fields parameter %s,\\n'\n '\\tuse Data(data).relabel(%s) to rename '\n 'fields' % (names,\n fields,\n ', '.join('%s=%r' % (k, v)\n for k, v in\n zip(names, fields))))\n types = dshape.measure.types\n schema = Record(list(zip(fields, types)))\n dshape = DataShape(*(dshape.shape + (schema,)))\n\n ds = datashape.dshape(dshape)\n return InteractiveSymbol(data, ds, name)\n\n\ndef Table(*args, **kwargs):\n \"\"\" Deprecated, see Data instead \"\"\"\n warnings.warn(\"Table is deprecated, use Data instead\",\n DeprecationWarning)\n return Data(*args, **kwargs)\n\n\n@dispatch(InteractiveSymbol, dict)\ndef _subs(o, d):\n return o\n\n\n@dispatch(Expr)\ndef compute(expr, **kwargs):\n resources = expr._resources()\n if not resources:\n raise ValueError(\"No data resources found\")\n else:\n return compute(expr, resources, **kwargs)\n\n\ndef concrete_head(expr, n=10):\n \"\"\" Return head of computed expression \"\"\"\n if not expr._resources():\n raise ValueError(\"Expression does not contain data resources\")\n if not iscollection(expr.dshape):\n return compute(expr)\n\n head = expr.head(n + 1)\n\n if not iscollection(expr.dshape):\n return odo(head, object)\n elif isrecord(expr.dshape.measure):\n return odo(head, DataFrame)\n else:\n df = odo(head, DataFrame)\n df.columns = [expr._name]\n return df\n result = compute(head)\n\n if len(result) == 0:\n return DataFrame(columns=expr.fields)\n if isrecord(expr.dshape.measure):\n return odo(result, DataFrame, dshape=expr.dshape)\n else:\n df = odo(result, DataFrame, dshape=expr.dshape)\n df.columns = [expr._name]\n return df\n\n\ndef repr_tables(expr, n=10):\n result = concrete_head(expr, n).rename(columns={None: ''})\n\n if isinstance(result, (DataFrame, Series)):\n s = repr(result)\n if len(result) > 10:\n s = '\\n'.join(s.split('\\n')[:-1]) + '\\n...'\n return s\n else:\n return repr(result) # pragma: no cover\n\n\ndef numel(shape):\n if var in shape:\n return None\n if not shape:\n return 1\n return reduce(operator.mul, shape, 1)\n\n\ndef short_dshape(ds, nlines=5):\n s = datashape.coretypes.pprint(ds)\n lines = s.split('\\n')\n if len(lines) > 5:\n s = '\\n'.join(lines[:nlines]) + '\\n ...'\n return s\n\n\ndef coerce_to(typ, x, odo_kwargs=None):\n try:\n return typ(x)\n except TypeError:\n return odo(x, typ, **(odo_kwargs or {}))\n\n\ndef coerce_scalar(result, dshape, odo_kwargs=None):\n coerce_ = partial(coerce_to, x=result, odo_kwargs=odo_kwargs)\n if 'float' in dshape:\n return coerce_(float)\n if 'decimal' in dshape:\n return coerce_(decimal.Decimal)\n elif 'int' in dshape:\n return coerce_(int)\n elif 'bool' in dshape:\n return coerce_(bool)\n elif 'datetime' in dshape:\n return coerce_(Timestamp)\n elif 'date' in dshape:\n return coerce_(datetime.date)\n else:\n return result\n\n\ndef expr_repr(expr, n=10):\n # Pure Expressions, not interactive\n if not set(expr._resources().keys()).issuperset(expr._leaves()):\n return str(expr)\n\n # Scalars\n if ndim(expr) == 0 and isscalar(expr.dshape):\n return repr(coerce_scalar(compute(expr), str(expr.dshape)))\n\n # Tables\n if (ndim(expr) == 1 and (istabular(expr.dshape) or\n isscalar(expr.dshape.measure) or\n isinstance(expr.dshape.measure, Map))):\n return repr_tables(expr, 10)\n\n # Smallish arrays\n if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:\n return repr(compute(expr))\n\n # Other\n dat = expr._resources().values()\n if len(dat) == 1:\n dat = list(dat)[0] # may be dict_values\n\n s = 'Data: %s' % dat\n if not isinstance(expr, Symbol):\n s += '\\nExpr: %s' % str(expr)\n s += '\\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)\n\n return s\n\n\n@dispatch(DataFrame)\ndef to_html(df):\n return df.to_html()\n\n\n@dispatch(Expr)\ndef to_html(expr):\n # Tables\n if not expr._resources() or ndim(expr) != 1:\n return to_html(repr(expr))\n return to_html(concrete_head(expr))\n\n\n@dispatch(object)\ndef to_html(o):\n return repr(o)\n\n\n@dispatch(_strtypes)\ndef to_html(o):\n return o.replace('\\n', '<br>')\n\n\n@dispatch((object, type, str, unicode), Expr)\ndef into(a, b, **kwargs):\n result = compute(b, **kwargs)\n kwargs['dshape'] = b.dshape\n return into(a, result, **kwargs)\n\n\ndef table_length(expr):\n try:\n return expr._len()\n except ValueError:\n return int(expr.count())\n\n\nExpr.__repr__ = expr_repr\nExpr._repr_html_ = lambda x: to_html(x)\nExpr.__len__ = table_length\n\n\ndef intonumpy(data, dtype=None, **kwargs):\n # TODO: Don't ignore other kwargs like copy\n result = odo(data, np.ndarray)\n if dtype and result.dtype != dtype:\n result = result.astype(dtype)\n return result\n\n\ndef convert_base(typ, x):\n x = compute(x)\n try:\n return typ(x)\n except:\n return typ(odo(x, typ))\n\nExpr.__array__ = intonumpy\nExpr.__int__ = lambda x: convert_base(int, x)\nExpr.__float__ = lambda x: convert_base(float, x)\nExpr.__complex__ = lambda x: convert_base(complex, x)\nExpr.__bool__ = lambda x: convert_base(bool, x)\nExpr.__nonzero__ = lambda x: convert_base(bool, x)\nExpr.__iter__ = into(Iterator)\n", "path": "blaze/interactive.py" } ]
diff --git a/blaze/interactive.py b/blaze/interactive.py index 64b5e75a2..cce296a57 100644 --- a/blaze/interactive.py +++ b/blaze/interactive.py @@ -255,7 +255,7 @@ def coerce_scalar(result, dshape, odo_kwargs=None): def expr_repr(expr, n=10): # Pure Expressions, not interactive - if not expr._resources(): + if not set(expr._resources().keys()).issuperset(expr._leaves()): return str(expr) # Scalars diff --git a/blaze/tests/test_interactive.py b/blaze/tests/test_interactive.py index 1130ade83..c868f613c 100644 --- a/blaze/tests/test_interactive.py +++ b/blaze/tests/test_interactive.py @@ -459,3 +459,16 @@ def test_pickle_roundtrip(): def test_nameless_data(): data = [('a', 1)] assert repr(data) in repr(Data(data)) + + +def test_partially_bound_expr(): + df = pd.DataFrame([(1, 'Alice', 100), + (2, 'Bob', -200), + (3, 'Charlie', 300), + (4, 'Denis', 400), + (5, 'Edith', -500)], + columns=['id', 'name', 'balance']) + data = Data(df, name='data') + a = symbol('a', 'int') + expr = data.name[data.balance > a] + assert repr(expr) == 'data[data.balance > a].name'
marshmallow-code__webargs-482
Fix simple typo: objec -> object There is a small typo in src/webargs/flaskparser.py. Should read `object` rather than `objec`.
[ { "content": "\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n\n @app.route('/')\n @use_args(hello_args)\n def index(args):\n return 'Hello ' + args['name']\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n err.data = kwargs\n err.exc = exc\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(\n view_args=\"load_view_args\",\n path=\"load_view_args\",\n **core.Parser.__location_map__,\n )\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.get_data(cache=True))\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n abort(400, exc=error, messages={\"json\": [\"Invalid JSON body.\"]})\n\n def load_view_args(self, req, schema):\n \"\"\"Return the request's ``view_args`` or ``missing`` if there are none.\"\"\"\n return req.view_args or core.missing\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.args, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.form, schema)\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.headers, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.cookies\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.files, schema)\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS\n # on marshmallow 2, a many schema receiving a non-list value will\n # produce this specific error back -- reformat it to match the\n # marshmallow 3 message so that Flask can properly encode it\n messages = error.messages\n if (\n MARSHMALLOW_VERSION_INFO[0] < 3\n and schema.many\n and messages == {0: {}, \"_schema\": [\"Invalid input type.\"]}\n ):\n messages.pop(0)\n abort(\n status_code,\n exc=error,\n messages=error.messages,\n schema=schema,\n headers=error_headers,\n )\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request objec by default\"\"\"\n return flask.request\n\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/flaskparser.py" } ]
[ { "content": "\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n\n @app.route('/')\n @use_args(hello_args)\n def index(args):\n return 'Hello ' + args['name']\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n err.data = kwargs\n err.exc = exc\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(\n view_args=\"load_view_args\",\n path=\"load_view_args\",\n **core.Parser.__location_map__,\n )\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.get_data(cache=True))\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n abort(400, exc=error, messages={\"json\": [\"Invalid JSON body.\"]})\n\n def load_view_args(self, req, schema):\n \"\"\"Return the request's ``view_args`` or ``missing`` if there are none.\"\"\"\n return req.view_args or core.missing\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.args, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.form, schema)\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.headers, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.cookies\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.files, schema)\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS\n # on marshmallow 2, a many schema receiving a non-list value will\n # produce this specific error back -- reformat it to match the\n # marshmallow 3 message so that Flask can properly encode it\n messages = error.messages\n if (\n MARSHMALLOW_VERSION_INFO[0] < 3\n and schema.many\n and messages == {0: {}, \"_schema\": [\"Invalid input type.\"]}\n ):\n messages.pop(0)\n abort(\n status_code,\n exc=error,\n messages=error.messages,\n schema=schema,\n headers=error_headers,\n )\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request object by default\"\"\"\n return flask.request\n\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/flaskparser.py" } ]
diff --git a/AUTHORS.rst b/AUTHORS.rst index cd22a8c4..0672ba3d 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -47,3 +47,4 @@ Contributors (chronological) * Nate Dellinger `@Nateyo <https://github.com/Nateyo>`_ * Karthikeyan Singaravelan `@tirkarthi <https://github.com/tirkarthi>`_ * Sami Salonen `@suola <https://github.com/suola>`_ +* Tim Gates `@timgates42 <https://github.com/timgates42>`_ diff --git a/src/webargs/flaskparser.py b/src/webargs/flaskparser.py index d628b9ac..b67976ef 100644 --- a/src/webargs/flaskparser.py +++ b/src/webargs/flaskparser.py @@ -114,7 +114,7 @@ def handle_error(self, error, req, schema, *, error_status_code, error_headers): ) def get_default_request(self): - """Override to use Flask's thread-local request objec by default""" + """Override to use Flask's thread-local request object by default""" return flask.request
DataBiosphere__toil-3070
Progress bar is cool but... It requires the terminal to be `reset` when run in a screen session. Also, for cactus anyway, it spends the vast majority of the runtime at 99%/100%. ┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-558) ┆Issue Number: TOIL-558
[ { "content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20200324120055'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n dill = 'dill==0.3.1.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n enlighten = 'enlighten>=1.5.1, <2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pathlib2,\n pytz,\n enlighten]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py" } ]
[ { "content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20200324120055'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n dill = 'dill==0.3.1.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n enlighten = 'enlighten>=1.5.2, <2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pathlib2,\n pytz,\n enlighten]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index f6cde1d42b..3d2915cc54 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ def runSetup(): dateutil = 'python-dateutil' addict = 'addict<=2.2.0' pathlib2 = 'pathlib2==2.3.2' - enlighten = 'enlighten>=1.5.1, <2' + enlighten = 'enlighten>=1.5.2, <2' core_reqs = [ dill,
cowrie__cowrie-1030
adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials Fresh install. I tried to exclude 'password' or 'abc123' from valid passwords for the user root Now file looks like ``` root:x:!root root:x:!123456 root:x:!password root:x:* ``` Retarted cowrie, but no way to deny login with root/password credentials Maybe, some sort of problem with new regexp checking?
[ { "content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains authentication code\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\n_USERDB_DEFAULTS = [\n 'root:x:!root',\n 'root:x:!123456',\n 'root:x:!/honeypot/i',\n 'root:x:*',\n 'richard:x:*',\n 'richard:x:fout',\n]\n\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.load()\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n try:\n with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:\n userdb = db.readlines()\n except IOError:\n log.msg(\"Could not read etc/userdb.txt, default database activated\")\n userdb = _USERDB_DEFAULTS\n\n for user in userdb:\n if not user.startswith('#'):\n try:\n login = user.split(':')[0].encode('utf8')\n password = user.split(':')[2].strip().encode('utf8')\n except IndexError:\n continue\n else:\n self.adduser(login, password)\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n\n @param login: rule\n @type login: bytes\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n def adduser(self, login, passwd):\n \"\"\"\n All arguments are bytes\n\n @param login: user id\n @type login: bytes\n @param passwd: password\n @type passwd: bytes\n \"\"\"\n login = self.re_or_str(login)\n\n if passwd[0] == b'!':\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'r') as fp:\n try:\n self.uservar = json.load(fp)\n except Exception:\n self.uservar = {}\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'w') as fp:\n json.dump(data, fp)\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = str(thelogin) + ':' + str(thepasswd)\n\n if 'cache' not in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n else:\n if userpass in cache:\n ipinfo = self.uservar[src_ip]\n log.msg(\"Found cached: {}\".format(userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if 'max' not in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if 'try' not in ipinfo:\n ipinfo['try'] = 0\n if 'tried' not in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if 'user' not in ipinfo or 'pw' not in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n", "path": "src/cowrie/core/auth.py" } ]
[ { "content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains authentication code\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\n_USERDB_DEFAULTS = [\n 'root:x:!root',\n 'root:x:!123456',\n 'root:x:!/honeypot/i',\n 'root:x:*',\n 'richard:x:*',\n 'richard:x:fout',\n]\n\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.load()\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n try:\n with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:\n userdb = db.readlines()\n except IOError:\n log.msg(\"Could not read etc/userdb.txt, default database activated\")\n userdb = _USERDB_DEFAULTS\n\n for user in userdb:\n if not user.startswith('#'):\n try:\n login = user.split(':')[0].encode('utf8')\n password = user.split(':')[2].strip().encode('utf8')\n except IndexError:\n continue\n else:\n self.adduser(login, password)\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n\n @param login: rule\n @type login: bytes\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n def adduser(self, login, passwd):\n \"\"\"\n All arguments are bytes\n\n @param login: user id\n @type login: bytes\n @param passwd: password\n @type passwd: bytes\n \"\"\"\n login = self.re_or_str(login)\n\n if passwd[0] == ord(\"!\"):\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'r') as fp:\n try:\n self.uservar = json.load(fp)\n except Exception:\n self.uservar = {}\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'w') as fp:\n json.dump(data, fp)\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = str(thelogin) + ':' + str(thepasswd)\n\n if 'cache' not in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n else:\n if userpass in cache:\n ipinfo = self.uservar[src_ip]\n log.msg(\"Found cached: {}\".format(userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if 'max' not in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if 'try' not in ipinfo:\n ipinfo['try'] = 0\n if 'tried' not in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if 'user' not in ipinfo or 'pw' not in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n", "path": "src/cowrie/core/auth.py" } ]
diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py index 1541b6d4cd..7a3f8edde1 100644 --- a/src/cowrie/core/auth.py +++ b/src/cowrie/core/auth.py @@ -98,7 +98,7 @@ def adduser(self, login, passwd): """ login = self.re_or_str(login) - if passwd[0] == b'!': + if passwd[0] == ord("!"): policy = False passwd = passwd[1:] else:
hydroshare__hydroshare-5098
Haystack rest endpoint response serializer does not include short_id **Description of the bug** The Haystack REST endpoint for complex solr searches does not include the short_id into the response serializer. This is a critical piece of information for users of this endpoint. Steps to reproduce the bug: https://github.com/hydroshare/hydroshare/blob/d3bd1737a0179eac74cd68926b3b79b80894410e/hs_rest_api/discovery.py#L12 **Expected behavior** I expect resource ids to be included with search results so I can retrieve resources.
[ { "content": "from drf_haystack.serializers import HaystackSerializer\nfrom drf_haystack.viewsets import HaystackViewSet\nfrom hs_core.search_indexes import BaseResourceIndex\nfrom hs_core.models import BaseResource\nfrom drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \\\n HaystackFloatField\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers\n\n\nclass DiscoveryResourceSerializer(HaystackSerializer):\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n \"title\",\n \"author\",\n \"contributor\",\n \"subject\",\n \"abstract\",\n \"resource_type\",\n \"content_type\",\n \"coverage_type\",\n \"availability\",\n \"created\",\n \"modified\",\n \"start_date\",\n \"end_date\",\n \"east\",\n \"north\",\n \"eastlimit\",\n \"westlimit\",\n \"northlimit\",\n \"southlimit\"\n ]\n\n\nclass DiscoverResourceValidator(serializers.Serializer):\n text = HaystackCharField(required=False,\n help_text='Search across all Resource Fields')\n author = HaystackCharField(required=False,\n help_text='Search by author')\n contributor = HaystackMultiValueField(required=False,\n help_text='Search by contributor')\n subject = HaystackMultiValueField(required=False,\n help_text='Search within subject keywords')\n abstract = HaystackCharField(required=False,\n help_text='Search within the abstract')\n resource_type = HaystackCharField(required=False,\n help_text='Search by resource type')\n content_type = HaystackMultiValueField(required=False,\n help_text='Search by content type')\n coverage_type = HaystackMultiValueField(required=False,\n help_text='Search by coverage type '\n '(point, box, period)')\n availability = HaystackMultiValueField(required=False,\n help_text='Search by availability '\n '(discoverable, public, published)')\n created = HaystackDateField(required=False,\n help_text='Search by created date')\n modified = HaystackDateField(required=False,\n help_text='Search by modified date')\n start_date = HaystackDateField(required=False,\n help_text='Search by start date')\n end_date = HaystackDateField(required=False,\n help_text='Search by end date')\n east = HaystackFloatField(required=False,\n help_text='Search by location or box center east longitude')\n north = HaystackFloatField(required=False,\n help_text='Search by location or box center north latitude')\n eastlimit = HaystackFloatField(required=False,\n help_text='Search by east limit longitude')\n westlimit = HaystackFloatField(required=False,\n help_text='Search by west limit longitude')\n northlimit = HaystackFloatField(required=False,\n help_text='Search by north limit latitude')\n southlimit = HaystackFloatField(required=False,\n help_text='Search by south limit latitude')\n\n\nclass DiscoverSearchView(HaystackViewSet):\n index_models = [BaseResource]\n serializer_class = DiscoveryResourceSerializer\n\n @action(detail=True, methods=['get'])\n @swagger_auto_schema(operation_description=\"Search HydroShare Resources using solr conventions.\"\n \"We use haystack for queries so you can use all of \"\n \"the parameters described here in combination with \"\n \"field lookups \"\n \"https://django-haystack.readthedocs.io/en/latest/\"\n \"searchqueryset_api.html?highlight=lookups#id1\",\n query_serializer=DiscoverResourceValidator)\n def list(self, request):\n return super(DiscoverSearchView, self).list(request)\n", "path": "hs_rest_api/discovery.py" } ]
[ { "content": "from drf_haystack.serializers import HaystackSerializer\nfrom drf_haystack.viewsets import HaystackViewSet\nfrom hs_core.search_indexes import BaseResourceIndex\nfrom hs_core.models import BaseResource\nfrom drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \\\n HaystackFloatField\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers\n\n\nclass DiscoveryResourceSerializer(HaystackSerializer):\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n \"short_id\",\n \"title\",\n \"author\",\n \"contributor\",\n \"subject\",\n \"abstract\",\n \"resource_type\",\n \"content_type\",\n \"coverage_type\",\n \"availability\",\n \"created\",\n \"modified\",\n \"start_date\",\n \"end_date\",\n \"east\",\n \"north\",\n \"eastlimit\",\n \"westlimit\",\n \"northlimit\",\n \"southlimit\"\n ]\n\n\nclass DiscoverResourceValidator(serializers.Serializer):\n text = HaystackCharField(required=False,\n help_text='Search across all Resource Fields')\n author = HaystackCharField(required=False,\n help_text='Search by author')\n contributor = HaystackMultiValueField(required=False,\n help_text='Search by contributor')\n subject = HaystackMultiValueField(required=False,\n help_text='Search within subject keywords')\n abstract = HaystackCharField(required=False,\n help_text='Search within the abstract')\n resource_type = HaystackCharField(required=False,\n help_text='Search by resource type')\n content_type = HaystackMultiValueField(required=False,\n help_text='Search by content type')\n coverage_type = HaystackMultiValueField(required=False,\n help_text='Search by coverage type '\n '(point, box, period)')\n availability = HaystackMultiValueField(required=False,\n help_text='Search by availability '\n '(discoverable, public, published)')\n created = HaystackDateField(required=False,\n help_text='Search by created date')\n modified = HaystackDateField(required=False,\n help_text='Search by modified date')\n start_date = HaystackDateField(required=False,\n help_text='Search by start date')\n end_date = HaystackDateField(required=False,\n help_text='Search by end date')\n east = HaystackFloatField(required=False,\n help_text='Search by location or box center east longitude')\n north = HaystackFloatField(required=False,\n help_text='Search by location or box center north latitude')\n eastlimit = HaystackFloatField(required=False,\n help_text='Search by east limit longitude')\n westlimit = HaystackFloatField(required=False,\n help_text='Search by west limit longitude')\n northlimit = HaystackFloatField(required=False,\n help_text='Search by north limit latitude')\n southlimit = HaystackFloatField(required=False,\n help_text='Search by south limit latitude')\n\n\nclass DiscoverSearchView(HaystackViewSet):\n index_models = [BaseResource]\n serializer_class = DiscoveryResourceSerializer\n\n @action(detail=True, methods=['get'])\n @swagger_auto_schema(operation_description=\"Search HydroShare Resources using solr conventions.\"\n \"We use haystack for queries so you can use all of \"\n \"the parameters described here in combination with \"\n \"field lookups \"\n \"https://django-haystack.readthedocs.io/en/latest/\"\n \"searchqueryset_api.html?highlight=lookups#id1\",\n query_serializer=DiscoverResourceValidator)\n def list(self, request):\n return super(DiscoverSearchView, self).list(request)\n", "path": "hs_rest_api/discovery.py" } ]
diff --git a/hs_rest_api/discovery.py b/hs_rest_api/discovery.py index faf613057d..71efe1b41f 100755 --- a/hs_rest_api/discovery.py +++ b/hs_rest_api/discovery.py @@ -13,6 +13,7 @@ class DiscoveryResourceSerializer(HaystackSerializer): class Meta: index_classes = [BaseResourceIndex] fields = [ + "short_id", "title", "author", "contributor",
googleapis__python-bigquery-306
Needs protobuf minimum version 3.12.0 If you are still having issues, please be sure to include as much information as possible: #### Environment details - OS type and version: MacOS - Python version: `python --version` 3.8 - pip version: `pip --version` 20.2.3 - `google-cloud-bigquery` version: `pip show google-cloud-bigquery` 2.0.0 #### Steps to reproduce 1. Just install the package, import somewhere and run it. #### Code example ```python from google.cloud import bigquery ``` #### Stack trace ``` File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 570, in _GetFieldByName return message_descriptor.fields_by_name[field_name] KeyError: 'proto3_optional' ... from google.cloud import bigquery File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/__init__.py", line 35, in <module> from google.cloud.bigquery.client import Client File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 57, in <module> from google.cloud.bigquery import _pandas_helpers File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/_pandas_helpers.py", line 36, in <module> from google.cloud.bigquery import schema File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/schema.py", line 19, in <module> from google.cloud.bigquery_v2 import types File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/__init__.py", line 19, in <module> from .types.encryption_config import EncryptionConfiguration File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/__init__.py", line 18, in <module> from .encryption_config import EncryptionConfiguration File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/encryption_config.py", line 29, in <module> class EncryptionConfiguration(proto.Message): File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in __new__ field=[i.descriptor for i in fields], File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in <listcomp> field=[i.descriptor for i in fields], File "/.../venv/lib/python3.8/site-packages/proto/fields.py", line 104, in descriptor self._descriptor = descriptor_pb2.FieldDescriptorProto( File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 509, in init field = _GetFieldByName(message_descriptor, field_name) File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 572, in _GetFieldByName raise ValueError('Protocol message %s has no "%s" field.' % ValueError: Protocol message FieldDescriptorProto has no "proto3_optional" field. ```
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.22.2, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.22.2, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index abd5cef95..c7410601e 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,7 @@ "google-cloud-core >= 1.4.1, < 2.0dev", "google-resumable-media >= 0.6.0, < 2.0dev", "six >=1.13.0,< 2.0.0dev", + "protobuf >= 3.12.0", ] extras = { "bqstorage": [ diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt index 798804941..cea0ed84e 100644 --- a/testing/constraints-3.6.txt +++ b/testing/constraints-3.6.txt @@ -8,6 +8,7 @@ libcst==0.2.5 llvmlite==0.34.0 # pandas 0.23.0 is the first version to work with pyarrow to_pandas. pandas==0.23.0 +protobuf == 3.12.0 proto-plus==1.10.0 pyarrow==1.0.0 python-snappy==0.5.4
xorbitsai__inference-87
BUG: too many clients ### Describe the bug When running the model_ref.generate() function in iPython, there seems to be a client created for every word generation, eventually leading to the following error: `gaierror: [Errno 8] nodename nor servname provided, or not known` ### To Reproduce `python -m plexar.deploy.cmdline supervisor -a localhost:9999 --log-level debug` `python -m plexar.deploy.cmdline worker --supervisor-address localhost:9999 -a localhost:10000 --log-level debug` ``` import sys from plexar.client import Client client = Client("localhost:9999") model_uid = client.launch_model("wizardlm-v1.0",7,"ggmlv3","q4_0") model_ref = client.get_model(model_uid) async for c in await model_ref.generate("Once upon a time, there was a very old computer.", {'max_tokens': 512}): sys.stdout.write(c['choices'][0]['text']) ``` ### Expected behavior First the warnings are printed: `Actor caller has created too many clients ([some number] >= 100), the global router may not be set.` Then we have the gaierror after the `[some number]` exceeds 240.
[ { "content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom . import _version\n\n__version__ = _version.get_versions()[\"version\"]\n\n\ndef install():\n from .model import install as install_model\n\n install_model()\n\n\ninstall()\ndel install\n", "path": "plexar/__init__.py" } ]
[ { "content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom . import _version\n\n__version__ = _version.get_versions()[\"version\"]\n\n\ndef install():\n from xoscar.backends.router import Router\n\n from .model import install as install_model\n\n default_router = Router.get_instance_or_empty()\n Router.set_instance(default_router)\n install_model()\n\n\ninstall()\ndel install\n", "path": "plexar/__init__.py" } ]
diff --git a/plexar/__init__.py b/plexar/__init__.py index 27261e73f0..99ddef38b0 100644 --- a/plexar/__init__.py +++ b/plexar/__init__.py @@ -19,8 +19,12 @@ def install(): + from xoscar.backends.router import Router + from .model import install as install_model + default_router = Router.get_instance_or_empty() + Router.set_instance(default_router) install_model()
pyro-ppl__numpyro-1136
SA kernel missing model attribute Trying to load an MCMC run using an SA kernel into `arviz`, I ran into this issue: ``` def test_model(...) ... kernel = SA(test_model) mcmc_test = MCMC(kernel, ...) mcmc_test.run(...) data_test = az.from_numpyro(mcmc_test) ``` which raises... ```--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) /tmp/ipykernel_20194/2118154136.py in <module> ----> 1 data_test = az.from_numpyro(mcmc_test) ~/miniconda3/envs/refit_fvs/lib/python3.9/site-packages/arviz/data/io_numpyro.py in from_numpyro(posterior, prior, posterior_predictive, predictions, constant_data, predictions_constant_data, coords, dims, pred_dims, num_chains) 331 Number of chains used for sampling. Ignored if posterior is present. 332 """ --> 333 return NumPyroConverter( 334 posterior=posterior, 335 prior=prior, ~/miniconda3/envs/refit_fvs/lib/python3.9/site-packages/arviz/data/io_numpyro.py in __init__(self, posterior, prior, posterior_predictive, predictions, constant_data, predictions_constant_data, coords, dims, pred_dims, num_chains) 91 self._samples = samples 92 self.nchains, self.ndraws = posterior.num_chains, posterior.num_samples ---> 93 self.model = self.posterior.sampler.model 94 # model arguments and keyword arguments 95 self._args = self.posterior._args # pylint: disable=protected-access AttributeError: 'SA' object has no attribute 'model' ``` Looking at the source code and noticing that this works for NUTS and HMC, the missing piece in the SA class seems to be the `model` property... going to prepare a PR to add it in the same manner as it's found in NUTS and HMC classes.
[ { "content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\n\nfrom jax import device_put, lax, random, vmap\nfrom jax.flatten_util import ravel_pytree\nimport jax.numpy as jnp\nfrom jax.scipy.special import logsumexp\n\nimport numpyro.distributions as dist\nfrom numpyro.distributions.util import cholesky_update\nfrom numpyro.infer.mcmc import MCMCKernel\nfrom numpyro.infer.util import init_to_uniform, initialize_model\nfrom numpyro.util import identity\n\n\ndef _get_proposal_loc_and_scale(samples, loc, scale, new_sample):\n # get loc/scale of q_{-n} (Algorithm 1, line 5 of ref [1]) for n from 1 -> N\n # these loc/scale will be stacked to the first dim; so\n # proposal_loc.shape[0] = proposal_loc.shape[0] = N\n # Here, we use the numerical stability procedure in Appendix 6 of [1].\n weight = 1 / samples.shape[0]\n if scale.ndim > loc.ndim:\n new_scale = cholesky_update(scale, new_sample - loc, weight)\n proposal_scale = cholesky_update(new_scale, samples - loc, -weight)\n proposal_scale = cholesky_update(\n proposal_scale, new_sample - samples, -(weight ** 2)\n )\n else:\n var = jnp.square(scale) + weight * jnp.square(new_sample - loc)\n proposal_var = var - weight * jnp.square(samples - loc)\n proposal_var = proposal_var - weight ** 2 * jnp.square(new_sample - samples)\n proposal_scale = jnp.sqrt(proposal_var)\n\n proposal_loc = loc + weight * (new_sample - samples)\n return proposal_loc, proposal_scale\n\n\ndef _sample_proposal(inv_mass_matrix_sqrt, rng_key, batch_shape=()):\n eps = random.normal(rng_key, batch_shape + jnp.shape(inv_mass_matrix_sqrt)[:1])\n if inv_mass_matrix_sqrt.ndim == 1:\n r = jnp.multiply(inv_mass_matrix_sqrt, eps)\n elif inv_mass_matrix_sqrt.ndim == 2:\n r = jnp.matmul(inv_mass_matrix_sqrt, eps[..., None])[..., 0]\n else:\n raise ValueError(\"Mass matrix has incorrect number of dims.\")\n return r\n\n\nSAAdaptState = namedtuple(\"SAAdaptState\", [\"zs\", \"pes\", \"loc\", \"inv_mass_matrix_sqrt\"])\nSAState = namedtuple(\n \"SAState\",\n [\n \"i\",\n \"z\",\n \"potential_energy\",\n \"accept_prob\",\n \"mean_accept_prob\",\n \"diverging\",\n \"adapt_state\",\n \"rng_key\",\n ],\n)\n\"\"\"\nA :func:`~collections.namedtuple` used in Sample Adaptive MCMC.\nThis consists of the following fields:\n\n - **i** - iteration. This is reset to 0 after warmup.\n - **z** - Python collection representing values (unconstrained samples from\n the posterior) at latent sites.\n - **potential_energy** - Potential energy computed at the given value of ``z``.\n - **accept_prob** - Acceptance probability of the proposal. Note that ``z``\n does not correspond to the proposal if it is rejected.\n - **mean_accept_prob** - Mean acceptance probability until current iteration\n during warmup or sampling (for diagnostics).\n - **diverging** - A boolean value to indicate whether the new sample potential energy\n is diverging from the current one.\n - **adapt_state** - A ``SAAdaptState`` namedtuple which contains adaptation information:\n\n + **zs** - Step size to be used by the integrator in the next iteration.\n + **pes** - Potential energies of `zs`.\n + **loc** - Mean of those `zs`.\n + **inv_mass_matrix_sqrt** - If using dense mass matrix, this is Cholesky of the\n covariance of `zs`. Otherwise, this is standard deviation of those `zs`.\n\n - **rng_key** - random number generator seed used for the iteration.\n\"\"\"\n\n\ndef _numpy_delete(x, idx):\n \"\"\"\n Gets the subarray from `x` where data from index `idx` on the first axis is removed.\n \"\"\"\n # NB: numpy.delete is not yet available in JAX\n mask = jnp.arange(x.shape[0] - 1) < idx\n return jnp.where(mask.reshape((-1,) + (1,) * (x.ndim - 1)), x[:-1], x[1:])\n\n\n# TODO: consider to expose this functional style\ndef _sa(potential_fn=None, potential_fn_gen=None):\n wa_steps = None\n max_delta_energy = 1000.0\n\n def init_kernel(\n init_params,\n num_warmup,\n adapt_state_size=None,\n inverse_mass_matrix=None,\n dense_mass=False,\n model_args=(),\n model_kwargs=None,\n rng_key=random.PRNGKey(0),\n ):\n nonlocal wa_steps\n wa_steps = num_warmup\n pe_fn = potential_fn\n if potential_fn_gen:\n if pe_fn is not None:\n raise ValueError(\n \"Only one of `potential_fn` or `potential_fn_gen` must be provided.\"\n )\n else:\n kwargs = {} if model_kwargs is None else model_kwargs\n pe_fn = potential_fn_gen(*model_args, **kwargs)\n rng_key_sa, rng_key_zs, rng_key_z = random.split(rng_key, 3)\n z = init_params\n z_flat, unravel_fn = ravel_pytree(z)\n if inverse_mass_matrix is None:\n inverse_mass_matrix = (\n jnp.identity(z_flat.shape[-1])\n if dense_mass\n else jnp.ones(z_flat.shape[-1])\n )\n inv_mass_matrix_sqrt = (\n jnp.linalg.cholesky(inverse_mass_matrix)\n if dense_mass\n else jnp.sqrt(inverse_mass_matrix)\n )\n if adapt_state_size is None:\n # XXX: heuristic choice\n adapt_state_size = 2 * z_flat.shape[-1]\n else:\n assert adapt_state_size > 1, \"adapt_state_size should be greater than 1.\"\n # NB: mean is init_params\n zs = z_flat + _sample_proposal(\n inv_mass_matrix_sqrt, rng_key_zs, (adapt_state_size,)\n )\n # compute potential energies\n pes = lax.map(lambda z: pe_fn(unravel_fn(z)), zs)\n if dense_mass:\n cov = jnp.cov(zs, rowvar=False, bias=True)\n if cov.shape == (): # JAX returns scalar for 1D input\n cov = cov.reshape((1, 1))\n cholesky = jnp.linalg.cholesky(cov)\n # if cholesky is NaN, we use the scale from `sample_proposal` here\n inv_mass_matrix_sqrt = jnp.where(\n jnp.any(jnp.isnan(cholesky)), inv_mass_matrix_sqrt, cholesky\n )\n else:\n inv_mass_matrix_sqrt = jnp.std(zs, 0)\n adapt_state = SAAdaptState(zs, pes, jnp.mean(zs, 0), inv_mass_matrix_sqrt)\n k = random.categorical(rng_key_z, jnp.zeros(zs.shape[0]))\n z = unravel_fn(zs[k])\n pe = pes[k]\n sa_state = SAState(\n jnp.array(0),\n z,\n pe,\n jnp.zeros(()),\n jnp.zeros(()),\n jnp.array(False),\n adapt_state,\n rng_key_sa,\n )\n return device_put(sa_state)\n\n def sample_kernel(sa_state, model_args=(), model_kwargs=None):\n pe_fn = potential_fn\n if potential_fn_gen:\n pe_fn = potential_fn_gen(*model_args, **model_kwargs)\n zs, pes, loc, scale = sa_state.adapt_state\n # we recompute loc/scale after each iteration to avoid precision loss\n # XXX: consider to expose a setting to do this job periodically\n # to save some computations\n loc = jnp.mean(zs, 0)\n if scale.ndim == 2:\n cov = jnp.cov(zs, rowvar=False, bias=True)\n if cov.shape == (): # JAX returns scalar for 1D input\n cov = cov.reshape((1, 1))\n cholesky = jnp.linalg.cholesky(cov)\n scale = jnp.where(jnp.any(jnp.isnan(cholesky)), scale, cholesky)\n else:\n scale = jnp.std(zs, 0)\n\n rng_key, rng_key_z, rng_key_reject, rng_key_accept = random.split(\n sa_state.rng_key, 4\n )\n _, unravel_fn = ravel_pytree(sa_state.z)\n\n z = loc + _sample_proposal(scale, rng_key_z)\n pe = pe_fn(unravel_fn(z))\n pe = jnp.where(jnp.isnan(pe), jnp.inf, pe)\n diverging = (pe - sa_state.potential_energy) > max_delta_energy\n\n # NB: all terms having the pattern *s will have shape N x ...\n # and all terms having the pattern *s_ will have shape (N + 1) x ...\n locs, scales = _get_proposal_loc_and_scale(zs, loc, scale, z)\n zs_ = jnp.concatenate([zs, z[None, :]])\n pes_ = jnp.concatenate([pes, pe[None]])\n locs_ = jnp.concatenate([locs, loc[None, :]])\n scales_ = jnp.concatenate([scales, scale[None, ...]])\n if scale.ndim == 2: # dense_mass\n log_weights_ = (\n dist.MultivariateNormal(locs_, scale_tril=scales_).log_prob(zs_) + pes_\n )\n else:\n log_weights_ = dist.Normal(locs_, scales_).log_prob(zs_).sum(-1) + pes_\n # mask invalid values (nan, +inf) by -inf\n log_weights_ = jnp.where(jnp.isfinite(log_weights_), log_weights_, -jnp.inf)\n # get rejecting index\n j = random.categorical(rng_key_reject, log_weights_)\n zs = _numpy_delete(zs_, j)\n pes = _numpy_delete(pes_, j)\n loc = locs_[j]\n scale = scales_[j]\n adapt_state = SAAdaptState(zs, pes, loc, scale)\n\n # NB: weights[-1] / sum(weights) is the probability of rejecting the new sample `z`.\n accept_prob = 1 - jnp.exp(log_weights_[-1] - logsumexp(log_weights_))\n itr = sa_state.i + 1\n n = jnp.where(sa_state.i < wa_steps, itr, itr - wa_steps)\n mean_accept_prob = (\n sa_state.mean_accept_prob + (accept_prob - sa_state.mean_accept_prob) / n\n )\n\n # XXX: we make a modification of SA sampler in [1]\n # in [1], each MCMC state contains N points `zs`\n # here we do resampling to pick randomly a point from those N points\n k = random.categorical(rng_key_accept, jnp.zeros(zs.shape[0]))\n z = unravel_fn(zs[k])\n pe = pes[k]\n return SAState(\n itr, z, pe, accept_prob, mean_accept_prob, diverging, adapt_state, rng_key\n )\n\n return init_kernel, sample_kernel\n\n\n# TODO: this shares almost the same code as HMC, so we can abstract out much of the implementation\nclass SA(MCMCKernel):\n \"\"\"\n Sample Adaptive MCMC, a gradient-free sampler.\n\n This is a very fast (in term of n_eff / s) sampler but requires\n many warmup (burn-in) steps. In each MCMC step, we only need to\n evaluate potential function at one point.\n\n Note that unlike in reference [1], we return a randomly selected (i.e. thinned)\n subset of approximate posterior samples of size num_chains x num_samples\n instead of num_chains x num_samples x adapt_state_size.\n\n .. note:: We recommend to use this kernel with `progress_bar=False` in\n :class:`~numpyro.infer.mcmc.MCMC` to reduce JAX's dispatch overhead.\n\n **References:**\n\n 1. *Sample Adaptive MCMC* (https://papers.nips.cc/paper/9107-sample-adaptive-mcmc),\n Michael Zhu\n\n :param model: Python callable containing Pyro :mod:`~numpyro.primitives`.\n If model is provided, `potential_fn` will be inferred using the model.\n :param potential_fn: Python callable that computes the potential energy\n given input parameters. The input parameters to `potential_fn` can be\n any python collection type, provided that `init_params` argument to\n :meth:`init` has the same type.\n :param int adapt_state_size: The number of points to generate proposal\n distribution. Defaults to 2 times latent size.\n :param bool dense_mass: A flag to decide if mass matrix is dense or\n diagonal (default to ``dense_mass=True``)\n :param callable init_strategy: a per-site initialization function.\n See :ref:`init_strategy` section for available functions.\n \"\"\"\n\n def __init__(\n self,\n model=None,\n potential_fn=None,\n adapt_state_size=None,\n dense_mass=True,\n init_strategy=init_to_uniform,\n ):\n if not (model is None) ^ (potential_fn is None):\n raise ValueError(\"Only one of `model` or `potential_fn` must be specified.\")\n self._model = model\n self._potential_fn = potential_fn\n self._adapt_state_size = adapt_state_size\n self._dense_mass = dense_mass\n self._init_strategy = init_strategy\n self._init_fn = None\n self._potential_fn_gen = None\n self._postprocess_fn = None\n self._sample_fn = None\n\n def _init_state(self, rng_key, model_args, model_kwargs, init_params):\n if self._model is not None:\n init_params, potential_fn, postprocess_fn, _ = initialize_model(\n rng_key,\n self._model,\n dynamic_args=True,\n init_strategy=self._init_strategy,\n model_args=model_args,\n model_kwargs=model_kwargs,\n validate_grad=False,\n )\n init_params = init_params[0]\n # NB: init args is different from HMC\n self._init_fn, sample_fn = _sa(potential_fn_gen=potential_fn)\n self._potential_fn_gen = potential_fn\n if self._postprocess_fn is None:\n self._postprocess_fn = postprocess_fn\n else:\n self._init_fn, sample_fn = _sa(potential_fn=self._potential_fn)\n\n if self._sample_fn is None:\n self._sample_fn = sample_fn\n return init_params\n\n def init(\n self, rng_key, num_warmup, init_params=None, model_args=(), model_kwargs={}\n ):\n # non-vectorized\n if rng_key.ndim == 1:\n rng_key, rng_key_init_model = random.split(rng_key)\n # vectorized\n else:\n rng_key, rng_key_init_model = jnp.swapaxes(\n vmap(random.split)(rng_key), 0, 1\n )\n # we need only a single key for initializing PE / constraints fn\n rng_key_init_model = rng_key_init_model[0]\n init_params = self._init_state(\n rng_key_init_model, model_args, model_kwargs, init_params\n )\n if self._potential_fn and init_params is None:\n raise ValueError(\n \"Valid value of `init_params` must be provided with\" \" `potential_fn`.\"\n )\n\n # NB: init args is different from HMC\n sa_init_fn = lambda init_params, rng_key: self._init_fn( # noqa: E731\n init_params,\n num_warmup=num_warmup,\n adapt_state_size=self._adapt_state_size,\n dense_mass=self._dense_mass,\n rng_key=rng_key,\n model_args=model_args,\n model_kwargs=model_kwargs,\n )\n if rng_key.ndim == 1:\n init_state = sa_init_fn(init_params, rng_key)\n else:\n init_state = vmap(sa_init_fn)(init_params, rng_key)\n sample_fn = vmap(self._sample_fn, in_axes=(0, None, None))\n self._sample_fn = sample_fn\n return init_state\n\n @property\n def sample_field(self):\n return \"z\"\n\n @property\n def default_fields(self):\n return (\"z\", \"diverging\")\n\n def get_diagnostics_str(self, state):\n return \"acc. prob={:.2f}\".format(state.mean_accept_prob)\n\n def postprocess_fn(self, args, kwargs):\n if self._postprocess_fn is None:\n return identity\n return self._postprocess_fn(*args, **kwargs)\n\n def sample(self, state, model_args, model_kwargs):\n \"\"\"\n Run SA from the given :data:`~numpyro.infer.sa.SAState` and return the resulting\n :data:`~numpyro.infer.sa.SAState`.\n\n :param SAState state: Represents the current state.\n :param model_args: Arguments provided to the model.\n :param model_kwargs: Keyword arguments provided to the model.\n :return: Next `state` after running SA.\n \"\"\"\n return self._sample_fn(state, model_args, model_kwargs)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_sample_fn\"] = None\n state[\"_init_fn\"] = None\n state[\"_postprocess_fn\"] = None\n state[\"_potential_fn_gen\"] = None\n return state\n", "path": "numpyro/infer/sa.py" } ]
[ { "content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\n\nfrom jax import device_put, lax, random, vmap\nfrom jax.flatten_util import ravel_pytree\nimport jax.numpy as jnp\nfrom jax.scipy.special import logsumexp\n\nimport numpyro.distributions as dist\nfrom numpyro.distributions.util import cholesky_update\nfrom numpyro.infer.mcmc import MCMCKernel\nfrom numpyro.infer.util import init_to_uniform, initialize_model\nfrom numpyro.util import identity\n\n\ndef _get_proposal_loc_and_scale(samples, loc, scale, new_sample):\n # get loc/scale of q_{-n} (Algorithm 1, line 5 of ref [1]) for n from 1 -> N\n # these loc/scale will be stacked to the first dim; so\n # proposal_loc.shape[0] = proposal_loc.shape[0] = N\n # Here, we use the numerical stability procedure in Appendix 6 of [1].\n weight = 1 / samples.shape[0]\n if scale.ndim > loc.ndim:\n new_scale = cholesky_update(scale, new_sample - loc, weight)\n proposal_scale = cholesky_update(new_scale, samples - loc, -weight)\n proposal_scale = cholesky_update(\n proposal_scale, new_sample - samples, -(weight ** 2)\n )\n else:\n var = jnp.square(scale) + weight * jnp.square(new_sample - loc)\n proposal_var = var - weight * jnp.square(samples - loc)\n proposal_var = proposal_var - weight ** 2 * jnp.square(new_sample - samples)\n proposal_scale = jnp.sqrt(proposal_var)\n\n proposal_loc = loc + weight * (new_sample - samples)\n return proposal_loc, proposal_scale\n\n\ndef _sample_proposal(inv_mass_matrix_sqrt, rng_key, batch_shape=()):\n eps = random.normal(rng_key, batch_shape + jnp.shape(inv_mass_matrix_sqrt)[:1])\n if inv_mass_matrix_sqrt.ndim == 1:\n r = jnp.multiply(inv_mass_matrix_sqrt, eps)\n elif inv_mass_matrix_sqrt.ndim == 2:\n r = jnp.matmul(inv_mass_matrix_sqrt, eps[..., None])[..., 0]\n else:\n raise ValueError(\"Mass matrix has incorrect number of dims.\")\n return r\n\n\nSAAdaptState = namedtuple(\"SAAdaptState\", [\"zs\", \"pes\", \"loc\", \"inv_mass_matrix_sqrt\"])\nSAState = namedtuple(\n \"SAState\",\n [\n \"i\",\n \"z\",\n \"potential_energy\",\n \"accept_prob\",\n \"mean_accept_prob\",\n \"diverging\",\n \"adapt_state\",\n \"rng_key\",\n ],\n)\n\"\"\"\nA :func:`~collections.namedtuple` used in Sample Adaptive MCMC.\nThis consists of the following fields:\n\n - **i** - iteration. This is reset to 0 after warmup.\n - **z** - Python collection representing values (unconstrained samples from\n the posterior) at latent sites.\n - **potential_energy** - Potential energy computed at the given value of ``z``.\n - **accept_prob** - Acceptance probability of the proposal. Note that ``z``\n does not correspond to the proposal if it is rejected.\n - **mean_accept_prob** - Mean acceptance probability until current iteration\n during warmup or sampling (for diagnostics).\n - **diverging** - A boolean value to indicate whether the new sample potential energy\n is diverging from the current one.\n - **adapt_state** - A ``SAAdaptState`` namedtuple which contains adaptation information:\n\n + **zs** - Step size to be used by the integrator in the next iteration.\n + **pes** - Potential energies of `zs`.\n + **loc** - Mean of those `zs`.\n + **inv_mass_matrix_sqrt** - If using dense mass matrix, this is Cholesky of the\n covariance of `zs`. Otherwise, this is standard deviation of those `zs`.\n\n - **rng_key** - random number generator seed used for the iteration.\n\"\"\"\n\n\ndef _numpy_delete(x, idx):\n \"\"\"\n Gets the subarray from `x` where data from index `idx` on the first axis is removed.\n \"\"\"\n # NB: numpy.delete is not yet available in JAX\n mask = jnp.arange(x.shape[0] - 1) < idx\n return jnp.where(mask.reshape((-1,) + (1,) * (x.ndim - 1)), x[:-1], x[1:])\n\n\n# TODO: consider to expose this functional style\ndef _sa(potential_fn=None, potential_fn_gen=None):\n wa_steps = None\n max_delta_energy = 1000.0\n\n def init_kernel(\n init_params,\n num_warmup,\n adapt_state_size=None,\n inverse_mass_matrix=None,\n dense_mass=False,\n model_args=(),\n model_kwargs=None,\n rng_key=random.PRNGKey(0),\n ):\n nonlocal wa_steps\n wa_steps = num_warmup\n pe_fn = potential_fn\n if potential_fn_gen:\n if pe_fn is not None:\n raise ValueError(\n \"Only one of `potential_fn` or `potential_fn_gen` must be provided.\"\n )\n else:\n kwargs = {} if model_kwargs is None else model_kwargs\n pe_fn = potential_fn_gen(*model_args, **kwargs)\n rng_key_sa, rng_key_zs, rng_key_z = random.split(rng_key, 3)\n z = init_params\n z_flat, unravel_fn = ravel_pytree(z)\n if inverse_mass_matrix is None:\n inverse_mass_matrix = (\n jnp.identity(z_flat.shape[-1])\n if dense_mass\n else jnp.ones(z_flat.shape[-1])\n )\n inv_mass_matrix_sqrt = (\n jnp.linalg.cholesky(inverse_mass_matrix)\n if dense_mass\n else jnp.sqrt(inverse_mass_matrix)\n )\n if adapt_state_size is None:\n # XXX: heuristic choice\n adapt_state_size = 2 * z_flat.shape[-1]\n else:\n assert adapt_state_size > 1, \"adapt_state_size should be greater than 1.\"\n # NB: mean is init_params\n zs = z_flat + _sample_proposal(\n inv_mass_matrix_sqrt, rng_key_zs, (adapt_state_size,)\n )\n # compute potential energies\n pes = lax.map(lambda z: pe_fn(unravel_fn(z)), zs)\n if dense_mass:\n cov = jnp.cov(zs, rowvar=False, bias=True)\n if cov.shape == (): # JAX returns scalar for 1D input\n cov = cov.reshape((1, 1))\n cholesky = jnp.linalg.cholesky(cov)\n # if cholesky is NaN, we use the scale from `sample_proposal` here\n inv_mass_matrix_sqrt = jnp.where(\n jnp.any(jnp.isnan(cholesky)), inv_mass_matrix_sqrt, cholesky\n )\n else:\n inv_mass_matrix_sqrt = jnp.std(zs, 0)\n adapt_state = SAAdaptState(zs, pes, jnp.mean(zs, 0), inv_mass_matrix_sqrt)\n k = random.categorical(rng_key_z, jnp.zeros(zs.shape[0]))\n z = unravel_fn(zs[k])\n pe = pes[k]\n sa_state = SAState(\n jnp.array(0),\n z,\n pe,\n jnp.zeros(()),\n jnp.zeros(()),\n jnp.array(False),\n adapt_state,\n rng_key_sa,\n )\n return device_put(sa_state)\n\n def sample_kernel(sa_state, model_args=(), model_kwargs=None):\n pe_fn = potential_fn\n if potential_fn_gen:\n pe_fn = potential_fn_gen(*model_args, **model_kwargs)\n zs, pes, loc, scale = sa_state.adapt_state\n # we recompute loc/scale after each iteration to avoid precision loss\n # XXX: consider to expose a setting to do this job periodically\n # to save some computations\n loc = jnp.mean(zs, 0)\n if scale.ndim == 2:\n cov = jnp.cov(zs, rowvar=False, bias=True)\n if cov.shape == (): # JAX returns scalar for 1D input\n cov = cov.reshape((1, 1))\n cholesky = jnp.linalg.cholesky(cov)\n scale = jnp.where(jnp.any(jnp.isnan(cholesky)), scale, cholesky)\n else:\n scale = jnp.std(zs, 0)\n\n rng_key, rng_key_z, rng_key_reject, rng_key_accept = random.split(\n sa_state.rng_key, 4\n )\n _, unravel_fn = ravel_pytree(sa_state.z)\n\n z = loc + _sample_proposal(scale, rng_key_z)\n pe = pe_fn(unravel_fn(z))\n pe = jnp.where(jnp.isnan(pe), jnp.inf, pe)\n diverging = (pe - sa_state.potential_energy) > max_delta_energy\n\n # NB: all terms having the pattern *s will have shape N x ...\n # and all terms having the pattern *s_ will have shape (N + 1) x ...\n locs, scales = _get_proposal_loc_and_scale(zs, loc, scale, z)\n zs_ = jnp.concatenate([zs, z[None, :]])\n pes_ = jnp.concatenate([pes, pe[None]])\n locs_ = jnp.concatenate([locs, loc[None, :]])\n scales_ = jnp.concatenate([scales, scale[None, ...]])\n if scale.ndim == 2: # dense_mass\n log_weights_ = (\n dist.MultivariateNormal(locs_, scale_tril=scales_).log_prob(zs_) + pes_\n )\n else:\n log_weights_ = dist.Normal(locs_, scales_).log_prob(zs_).sum(-1) + pes_\n # mask invalid values (nan, +inf) by -inf\n log_weights_ = jnp.where(jnp.isfinite(log_weights_), log_weights_, -jnp.inf)\n # get rejecting index\n j = random.categorical(rng_key_reject, log_weights_)\n zs = _numpy_delete(zs_, j)\n pes = _numpy_delete(pes_, j)\n loc = locs_[j]\n scale = scales_[j]\n adapt_state = SAAdaptState(zs, pes, loc, scale)\n\n # NB: weights[-1] / sum(weights) is the probability of rejecting the new sample `z`.\n accept_prob = 1 - jnp.exp(log_weights_[-1] - logsumexp(log_weights_))\n itr = sa_state.i + 1\n n = jnp.where(sa_state.i < wa_steps, itr, itr - wa_steps)\n mean_accept_prob = (\n sa_state.mean_accept_prob + (accept_prob - sa_state.mean_accept_prob) / n\n )\n\n # XXX: we make a modification of SA sampler in [1]\n # in [1], each MCMC state contains N points `zs`\n # here we do resampling to pick randomly a point from those N points\n k = random.categorical(rng_key_accept, jnp.zeros(zs.shape[0]))\n z = unravel_fn(zs[k])\n pe = pes[k]\n return SAState(\n itr, z, pe, accept_prob, mean_accept_prob, diverging, adapt_state, rng_key\n )\n\n return init_kernel, sample_kernel\n\n\n# TODO: this shares almost the same code as HMC, so we can abstract out much of the implementation\nclass SA(MCMCKernel):\n \"\"\"\n Sample Adaptive MCMC, a gradient-free sampler.\n\n This is a very fast (in term of n_eff / s) sampler but requires\n many warmup (burn-in) steps. In each MCMC step, we only need to\n evaluate potential function at one point.\n\n Note that unlike in reference [1], we return a randomly selected (i.e. thinned)\n subset of approximate posterior samples of size num_chains x num_samples\n instead of num_chains x num_samples x adapt_state_size.\n\n .. note:: We recommend to use this kernel with `progress_bar=False` in\n :class:`~numpyro.infer.mcmc.MCMC` to reduce JAX's dispatch overhead.\n\n **References:**\n\n 1. *Sample Adaptive MCMC* (https://papers.nips.cc/paper/9107-sample-adaptive-mcmc),\n Michael Zhu\n\n :param model: Python callable containing Pyro :mod:`~numpyro.primitives`.\n If model is provided, `potential_fn` will be inferred using the model.\n :param potential_fn: Python callable that computes the potential energy\n given input parameters. The input parameters to `potential_fn` can be\n any python collection type, provided that `init_params` argument to\n :meth:`init` has the same type.\n :param int adapt_state_size: The number of points to generate proposal\n distribution. Defaults to 2 times latent size.\n :param bool dense_mass: A flag to decide if mass matrix is dense or\n diagonal (default to ``dense_mass=True``)\n :param callable init_strategy: a per-site initialization function.\n See :ref:`init_strategy` section for available functions.\n \"\"\"\n\n def __init__(\n self,\n model=None,\n potential_fn=None,\n adapt_state_size=None,\n dense_mass=True,\n init_strategy=init_to_uniform,\n ):\n if not (model is None) ^ (potential_fn is None):\n raise ValueError(\"Only one of `model` or `potential_fn` must be specified.\")\n self._model = model\n self._potential_fn = potential_fn\n self._adapt_state_size = adapt_state_size\n self._dense_mass = dense_mass\n self._init_strategy = init_strategy\n self._init_fn = None\n self._potential_fn_gen = None\n self._postprocess_fn = None\n self._sample_fn = None\n\n def _init_state(self, rng_key, model_args, model_kwargs, init_params):\n if self._model is not None:\n init_params, potential_fn, postprocess_fn, _ = initialize_model(\n rng_key,\n self._model,\n dynamic_args=True,\n init_strategy=self._init_strategy,\n model_args=model_args,\n model_kwargs=model_kwargs,\n validate_grad=False,\n )\n init_params = init_params[0]\n # NB: init args is different from HMC\n self._init_fn, sample_fn = _sa(potential_fn_gen=potential_fn)\n self._potential_fn_gen = potential_fn\n if self._postprocess_fn is None:\n self._postprocess_fn = postprocess_fn\n else:\n self._init_fn, sample_fn = _sa(potential_fn=self._potential_fn)\n\n if self._sample_fn is None:\n self._sample_fn = sample_fn\n return init_params\n\n def init(\n self, rng_key, num_warmup, init_params=None, model_args=(), model_kwargs={}\n ):\n # non-vectorized\n if rng_key.ndim == 1:\n rng_key, rng_key_init_model = random.split(rng_key)\n # vectorized\n else:\n rng_key, rng_key_init_model = jnp.swapaxes(\n vmap(random.split)(rng_key), 0, 1\n )\n # we need only a single key for initializing PE / constraints fn\n rng_key_init_model = rng_key_init_model[0]\n init_params = self._init_state(\n rng_key_init_model, model_args, model_kwargs, init_params\n )\n if self._potential_fn and init_params is None:\n raise ValueError(\n \"Valid value of `init_params` must be provided with\" \" `potential_fn`.\"\n )\n\n # NB: init args is different from HMC\n sa_init_fn = lambda init_params, rng_key: self._init_fn( # noqa: E731\n init_params,\n num_warmup=num_warmup,\n adapt_state_size=self._adapt_state_size,\n dense_mass=self._dense_mass,\n rng_key=rng_key,\n model_args=model_args,\n model_kwargs=model_kwargs,\n )\n if rng_key.ndim == 1:\n init_state = sa_init_fn(init_params, rng_key)\n else:\n init_state = vmap(sa_init_fn)(init_params, rng_key)\n sample_fn = vmap(self._sample_fn, in_axes=(0, None, None))\n self._sample_fn = sample_fn\n return init_state\n\n @property\n def model(self):\n return self._model\n\n @property\n def sample_field(self):\n return \"z\"\n\n @property\n def default_fields(self):\n return (\"z\", \"diverging\")\n\n def get_diagnostics_str(self, state):\n return \"acc. prob={:.2f}\".format(state.mean_accept_prob)\n\n def postprocess_fn(self, args, kwargs):\n if self._postprocess_fn is None:\n return identity\n return self._postprocess_fn(*args, **kwargs)\n\n def sample(self, state, model_args, model_kwargs):\n \"\"\"\n Run SA from the given :data:`~numpyro.infer.sa.SAState` and return the resulting\n :data:`~numpyro.infer.sa.SAState`.\n\n :param SAState state: Represents the current state.\n :param model_args: Arguments provided to the model.\n :param model_kwargs: Keyword arguments provided to the model.\n :return: Next `state` after running SA.\n \"\"\"\n return self._sample_fn(state, model_args, model_kwargs)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_sample_fn\"] = None\n state[\"_init_fn\"] = None\n state[\"_postprocess_fn\"] = None\n state[\"_potential_fn_gen\"] = None\n return state\n", "path": "numpyro/infer/sa.py" } ]
diff --git a/numpyro/infer/sa.py b/numpyro/infer/sa.py index 999aa4b50..15fd03f1f 100644 --- a/numpyro/infer/sa.py +++ b/numpyro/infer/sa.py @@ -365,6 +365,10 @@ def init( self._sample_fn = sample_fn return init_state + @property + def model(self): + return self._model + @property def sample_field(self): return "z"
iterative__dvc-5752
exp show: table misaligned for queued experiments # Bug Report ## Description `dvc exp show` misaligns columns for queued experiments. ### Reproduce ```console $ git clone [email protected]:iterative/example-get-started.git $ cd example-get-started $ pip install -r src/requirements.txt $ dvc pull data/data.xml.dvc $ dvc exp run -S train.n_est=200 --queue $ dvc exp show --no-pager ┏━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━┓ ┃ Experiment ┃ Created ┃ avg_prec ┃ roc_auc ┃ prepare.split ┃ prepare.seed ┃ … ┃ ┡━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━┩ │ workspace │ - │ 0.60405 │ 0.9608 │ 0.2 │ 20170428 │ … │ │ master │ Mar 01, 2021 │ 0.60405 │ 0.9608 │ 0.2 │ 20170428 │ … │ │ └── *4033075 │ 10:30 AM │ - │ 0.2 │ 20170428 │ 3000 │ 2 │ └──────────────┴──────────────┴──────────┴─────────┴───────────────┴──────────────┴───┘ ``` ### Expected ``` ┏━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━┓ ┃ Experiment ┃ Created ┃ avg_prec ┃ roc_auc ┃ prepare.split ┃ prepare.seed ┃ … ┃ ┡━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━┩ │ workspace │ - │ 0.60405 │ 0.9608 │ 0.2 │ 20170428 │ … │ │ master │ Mar 01, 2021 │ 0.60405 │ 0.9608 │ 0.2 │ 20170428 │ … │ │ └── *4033075 │ 10:30 AM │ - │ - | 0.2 │ 20170428 │ … │ └──────────────┴──────────────┴──────────┴─────────┴───────────────┴──────────────┴───┘ ``` ### Environment information **Output of `dvc doctor`:** ```console $ dvc doctor DVC version: 2.0.5+4ed55d --------------------------------- Platform: Python 3.8.5 on macOS-10.16-x86_64-i386-64bit Supports: gdrive, hdfs, webhdfs, http, https, s3, ssh, oss, webdav, webdavs Cache types: reflink, hardlink, symlink Cache directory: apfs on /dev/disk3s1s1 Caches: local Remotes: https Workspace directory: apfs on /dev/disk3s1s1 Repo: dvc, git ``` **Additional Information (if any):**
[ { "content": "import argparse\nimport logging\nfrom collections import Counter, OrderedDict, defaultdict\nfrom collections.abc import Mapping\nfrom datetime import date, datetime\nfrom itertools import groupby\nfrom typing import Dict, Iterable, Optional\n\nimport dvc.prompt as prompt\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.command.metrics import DEFAULT_PRECISION\nfrom dvc.command.repro import CmdRepro\nfrom dvc.command.repro import add_arguments as add_repro_arguments\nfrom dvc.exceptions import DvcException, InvalidArgumentError\nfrom dvc.utils.flatten import flatten\n\nlogger = logging.getLogger(__name__)\n\n\nSHOW_MAX_WIDTH = 1024\n\n\ndef _filter_name(names, label, filter_strs):\n ret = defaultdict(dict)\n path_filters = defaultdict(list)\n\n for filter_s in filter_strs:\n path, _, name = filter_s.rpartition(\":\")\n path_filters[path].append(tuple(name.split(\".\")))\n\n for path, filters in path_filters.items():\n if path:\n match_paths = [path]\n else:\n match_paths = names.keys()\n for length, groups in groupby(filters, len):\n for group in groups:\n for match_path in match_paths:\n possible_names = [\n tuple(name.split(\".\")) for name in names[match_path]\n ]\n matches = [\n name\n for name in possible_names\n if name[:length] == group\n ]\n if not matches:\n name = \".\".join(group)\n raise InvalidArgumentError(\n f\"'{name}' does not match any known {label}\"\n )\n ret[match_path].update(\n {\".\".join(match): None for match in matches}\n )\n\n return ret\n\n\ndef _filter_names(\n names: Dict[str, Dict[str, None]],\n label: str,\n include: Optional[Iterable],\n exclude: Optional[Iterable],\n):\n if include and exclude:\n intersection = set(include) & set(exclude)\n if intersection:\n values = \", \".join(intersection)\n raise InvalidArgumentError(\n f\"'{values}' specified in both --include-{label} and\"\n f\" --exclude-{label}\"\n )\n\n if include:\n ret = _filter_name(names, label, include)\n else:\n ret = names\n\n if exclude:\n to_remove = _filter_name(names, label, exclude)\n for path in to_remove:\n if path in ret:\n for key in to_remove[path]:\n if key in ret[path]:\n del ret[path][key]\n\n return ret\n\n\ndef _update_names(names, items):\n for name, item in items:\n if isinstance(item, dict):\n item = flatten(item)\n names[name].update({key: None for key in item})\n\n\ndef _collect_names(all_experiments, **kwargs):\n metric_names = defaultdict(dict)\n param_names = defaultdict(dict)\n\n for _, experiments in all_experiments.items():\n for exp in experiments.values():\n _update_names(metric_names, exp.get(\"metrics\", {}).items())\n _update_names(param_names, exp.get(\"params\", {}).items())\n\n metric_names = _filter_names(\n metric_names,\n \"metrics\",\n kwargs.get(\"include_metrics\"),\n kwargs.get(\"exclude_metrics\"),\n )\n param_names = _filter_names(\n (param_names),\n \"params\",\n kwargs.get(\"include_params\"),\n kwargs.get(\"exclude_params\"),\n )\n\n return metric_names, param_names\n\n\ndef _collect_rows(\n base_rev,\n experiments,\n metric_names,\n param_names,\n precision=DEFAULT_PRECISION,\n no_timestamp=False,\n sort_by=None,\n sort_order=None,\n):\n from dvc.scm.git import Git\n\n if sort_by:\n sort_path, sort_name, sort_type = _sort_column(\n sort_by, metric_names, param_names\n )\n reverse = sort_order == \"desc\"\n experiments = _sort_exp(\n experiments, sort_path, sort_name, sort_type, reverse\n )\n\n new_checkpoint = True\n for i, (rev, exp) in enumerate(experiments.items()):\n row = []\n style = None\n queued = \"*\" if exp.get(\"queued\", False) else \"\"\n\n tip = exp.get(\"checkpoint_tip\")\n parent = \"\"\n if rev == \"baseline\":\n if Git.is_sha(base_rev):\n name_rev = base_rev[:7]\n else:\n name_rev = base_rev\n name = exp.get(\"name\", name_rev)\n row.append(f\"{name}\")\n style = \"bold\"\n else:\n if tip:\n parent_rev = exp.get(\"checkpoint_parent\", \"\")\n parent_exp = experiments.get(parent_rev, {})\n parent_tip = parent_exp.get(\"checkpoint_tip\")\n if tip == parent_tip:\n if new_checkpoint:\n tree = \"│ ╓\"\n else:\n tree = \"│ ╟\"\n new_checkpoint = False\n else:\n if parent_rev == base_rev:\n tree = \"├─╨\"\n else:\n tree = \"│ ╟\"\n parent = f\" ({parent_rev[:7]})\"\n new_checkpoint = True\n else:\n if i < len(experiments) - 1:\n tree = \"├──\"\n else:\n tree = \"└──\"\n new_checkpoint = True\n name = exp.get(\"name\", rev[:7])\n row.append(f\"{tree} {queued}{name}{parent}\")\n\n if not no_timestamp:\n row.append(_format_time(exp.get(\"timestamp\")))\n\n _extend_row(\n row, metric_names, exp.get(\"metrics\", {}).items(), precision\n )\n _extend_row(row, param_names, exp.get(\"params\", {}).items(), precision)\n\n yield row, style\n\n\ndef _sort_column(sort_by, metric_names, param_names):\n path, _, sort_name = sort_by.rpartition(\":\")\n matches = set()\n\n if path:\n if path in metric_names and sort_name in metric_names[path]:\n matches.add((path, sort_name, \"metrics\"))\n if path in param_names and sort_name in param_names[path]:\n matches.add((path, sort_name, \"params\"))\n else:\n for path in metric_names:\n if sort_name in metric_names[path]:\n matches.add((path, sort_name, \"metrics\"))\n for path in param_names:\n if sort_name in param_names[path]:\n matches.add((path, sort_name, \"params\"))\n\n if len(matches) == 1:\n return matches.pop()\n if len(matches) > 1:\n raise InvalidArgumentError(\n \"Ambiguous sort column '{}' matched '{}'\".format(\n sort_by,\n \", \".join([f\"{path}:{name}\" for path, name, _ in matches]),\n )\n )\n raise InvalidArgumentError(f\"Unknown sort column '{sort_by}'\")\n\n\ndef _sort_exp(experiments, sort_path, sort_name, typ, reverse):\n def _sort(item):\n rev, exp = item\n tip = exp.get(\"checkpoint_tip\")\n if tip and tip != rev:\n # Sort checkpoint experiments by tip commit\n return _sort((tip, experiments[tip]))\n data = exp.get(typ, {}).get(sort_path, {})\n val = flatten(data).get(sort_name)\n return (val is None, val)\n\n ret = OrderedDict()\n if \"baseline\" in experiments:\n ret[\"baseline\"] = experiments.pop(\"baseline\")\n\n ret.update(sorted(experiments.items(), key=_sort, reverse=reverse))\n return ret\n\n\ndef _format_time(timestamp):\n if timestamp is None:\n return \"-\"\n if timestamp.date() == date.today():\n fmt = \"%I:%M %p\"\n else:\n fmt = \"%b %d, %Y\"\n return timestamp.strftime(fmt)\n\n\ndef _format_field(val, precision=DEFAULT_PRECISION):\n if isinstance(val, float):\n fmt = f\"{{:.{precision}g}}\"\n return fmt.format(val)\n if isinstance(val, Mapping):\n return {k: _format_field(v) for k, v in val.items()}\n if isinstance(val, list):\n return [_format_field(x) for x in val]\n return str(val)\n\n\ndef _extend_row(row, names, items, precision):\n from rich.text import Text\n\n if not items:\n row.extend([\"-\"] * len(names))\n return\n\n for fname, item in items:\n if isinstance(item, dict):\n item = flatten(item)\n else:\n item = {fname: item}\n for name in names[fname]:\n if name in item:\n value = item[name]\n if value is None:\n text = \"-\"\n else:\n # wrap field data in rich.Text, otherwise rich may\n # interpret unescaped braces from list/dict types as rich\n # markup tags\n text = Text(str(_format_field(value, precision)))\n row.append(text)\n else:\n row.append(\"-\")\n\n\ndef _parse_filter_list(param_list):\n ret = []\n for param_str in param_list:\n path, _, param_str = param_str.rpartition(\":\")\n if path:\n ret.extend(f\"{path}:{param}\" for param in param_str.split(\",\"))\n else:\n ret.extend(param_str.split(\",\"))\n return ret\n\n\ndef _experiments_table(all_experiments, **kwargs):\n from dvc.utils.table import Table\n\n include_metrics = _parse_filter_list(kwargs.pop(\"include_metrics\", []))\n exclude_metrics = _parse_filter_list(kwargs.pop(\"exclude_metrics\", []))\n include_params = _parse_filter_list(kwargs.pop(\"include_params\", []))\n exclude_params = _parse_filter_list(kwargs.pop(\"exclude_params\", []))\n\n metric_names, param_names = _collect_names(\n all_experiments,\n include_metrics=include_metrics,\n exclude_metrics=exclude_metrics,\n include_params=include_params,\n exclude_params=exclude_params,\n )\n\n table = Table()\n table.add_column(\n \"Experiment\", no_wrap=True, header_style=\"black on grey93\"\n )\n if not kwargs.get(\"no_timestamp\", False):\n table.add_column(\"Created\", header_style=\"black on grey93\")\n _add_data_columns(\n table,\n metric_names,\n justify=\"right\",\n no_wrap=True,\n header_style=\"black on cornsilk1\",\n )\n _add_data_columns(\n table, param_names, justify=\"left\", header_style=\"black on light_cyan1\"\n )\n\n for base_rev, experiments in all_experiments.items():\n for row, _, in _collect_rows(\n base_rev, experiments, metric_names, param_names, **kwargs,\n ):\n table.add_row(*row)\n\n return table\n\n\ndef _add_data_columns(table, names, **kwargs):\n count = Counter(\n name for path in names for name in names[path] for path in names\n )\n first = True\n for path in names:\n for name in names[path]:\n col_name = name if count[name] == 1 else f\"{path}:{name}\"\n kwargs[\"collapse\"] = False if first else True\n table.add_column(col_name, **kwargs)\n first = False\n\n\ndef _format_json(item):\n if isinstance(item, (date, datetime)):\n return item.isoformat()\n raise TypeError\n\n\nclass CmdExperimentsShow(CmdBase):\n def run(self):\n from rich.console import Console\n\n try:\n all_experiments = self.repo.experiments.show(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n sha_only=self.args.sha,\n num=self.args.num,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(all_experiments, default=_format_json))\n return 0\n\n if self.args.precision is None:\n precision = DEFAULT_PRECISION\n else:\n precision = self.args.precision\n\n table = _experiments_table(\n all_experiments,\n include_metrics=self.args.include_metrics,\n exclude_metrics=self.args.exclude_metrics,\n include_params=self.args.include_params,\n exclude_params=self.args.exclude_params,\n no_timestamp=self.args.no_timestamp,\n sort_by=self.args.sort_by,\n sort_order=self.args.sort_order,\n precision=precision,\n )\n\n console = Console()\n if self.args.no_pager:\n console.print(table)\n else:\n from dvc.utils.pager import DvcPager\n\n # NOTE: rich does not have native support for unlimited width\n # via pager. we override rich table compression by setting\n # console width to the full width of the table\n console_options = console.options\n console_options.max_width = SHOW_MAX_WIDTH\n measurement = table.__rich_measure__(console, console_options)\n console._width = ( # pylint: disable=protected-access\n measurement.maximum\n )\n with console.pager(pager=DvcPager(), styles=True):\n console.print(table)\n\n except DvcException:\n logger.exception(\"failed to show experiments\")\n return 1\n\n return 0\n\n\nclass CmdExperimentsApply(CmdBase):\n def run(self):\n\n self.repo.experiments.apply(\n self.args.experiment, force=self.args.force\n )\n\n return 0\n\n\ndef _show_diff(\n diff,\n title=\"\",\n markdown=False,\n no_path=False,\n old=False,\n precision=DEFAULT_PRECISION,\n):\n from dvc.utils.diff import table\n\n rows = []\n for fname, diff_ in diff.items():\n sorted_diff = OrderedDict(sorted(diff_.items()))\n for item, change in sorted_diff.items():\n row = [] if no_path else [fname]\n row.append(item)\n if old:\n row.append(_format_field(change.get(\"old\"), precision))\n row.append(_format_field(change[\"new\"], precision))\n row.append(\n _format_field(\n change.get(\"diff\", \"diff not supported\"), precision\n )\n )\n rows.append(row)\n\n header = [] if no_path else [\"Path\"]\n header.append(title)\n if old:\n header.extend([\"Old\", \"New\"])\n else:\n header.append(\"Value\")\n header.append(\"Change\")\n\n return table(header, rows, markdown)\n\n\nclass CmdExperimentsDiff(CmdBase):\n def run(self):\n\n try:\n diff = self.repo.experiments.diff(\n a_rev=self.args.a_rev,\n b_rev=self.args.b_rev,\n all=self.args.all,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(diff))\n else:\n if self.args.precision is None:\n precision = DEFAULT_PRECISION\n else:\n precision = self.args.precision\n\n diffs = [(\"metrics\", \"Metric\"), (\"params\", \"Param\")]\n for key, title in diffs:\n table = _show_diff(\n diff[key],\n title=title,\n markdown=self.args.show_md,\n no_path=self.args.no_path,\n old=self.args.old,\n precision=precision,\n )\n if table:\n logger.info(table)\n logger.info(\"\")\n\n except DvcException:\n logger.exception(\"failed to show experiments diff\")\n return 1\n\n return 0\n\n\nclass CmdExperimentsRun(CmdRepro):\n def run(self):\n from dvc.command.metrics import _show_metrics\n\n if self.args.reset and self.args.checkpoint_resume:\n raise InvalidArgumentError(\n \"--reset and --rev are mutually exclusive.\"\n )\n\n if self.args.reset:\n logger.info(\"Any existing checkpoints will be reset and re-run.\")\n\n results = self.repo.experiments.run(\n name=self.args.name,\n queue=self.args.queue,\n run_all=self.args.run_all,\n jobs=self.args.jobs,\n params=self.args.set_param,\n checkpoint_resume=self.args.checkpoint_resume,\n reset=self.args.reset,\n tmp_dir=self.args.tmp_dir,\n **self._repro_kwargs,\n )\n\n if self.args.metrics and results:\n metrics = self.repo.metrics.show(revs=list(results))\n metrics.pop(\"workspace\", None)\n logger.info(_show_metrics(metrics))\n\n return 0\n\n\ndef _raise_error_if_all_disabled(**kwargs):\n if not any(kwargs.values()):\n raise InvalidArgumentError(\n \"Either of `-w|--workspace`, `-a|--all-branches`, `-T|--all-tags` \"\n \"or `--all-commits` needs to be set.\"\n )\n\n\nclass CmdExperimentsGC(CmdRepro):\n def run(self):\n _raise_error_if_all_disabled(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n workspace=self.args.workspace,\n )\n\n msg = \"This will remove all experiments except those derived from \"\n\n msg += \"the workspace\"\n if self.args.all_commits:\n msg += \" and all git commits\"\n elif self.args.all_branches and self.args.all_tags:\n msg += \" and all git branches and tags\"\n elif self.args.all_branches:\n msg += \" and all git branches\"\n elif self.args.all_tags:\n msg += \" and all git tags\"\n msg += \" of the current repo.\"\n if self.args.queued:\n msg += \" Run queued experiments will be preserved.\"\n if self.args.queued:\n msg += \" Run queued experiments will be removed.\"\n\n logger.warning(msg)\n\n msg = \"Are you sure you want to proceed?\"\n if not self.args.force and not prompt.confirm(msg):\n return 1\n\n removed = self.repo.experiments.gc(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n workspace=self.args.workspace,\n queued=self.args.queued,\n )\n\n if removed:\n logger.info(\n f\"Removed {removed} experiments. To remove unused cache files \"\n \"use 'dvc gc'.\"\n )\n else:\n logger.info(\"No experiments to remove.\")\n return 0\n\n\nclass CmdExperimentsBranch(CmdBase):\n def run(self):\n\n self.repo.experiments.branch(self.args.experiment, self.args.branch)\n\n return 0\n\n\nclass CmdExperimentsList(CmdBase):\n def run(self):\n names_only = self.args.names_only\n exps = self.repo.experiments.ls(\n rev=self.args.rev,\n git_remote=self.args.git_remote,\n all_=self.args.all,\n )\n for baseline in exps:\n tag = self.repo.scm.describe(baseline)\n if not tag:\n branch = self.repo.scm.describe(baseline, base=\"refs/heads\")\n if branch:\n tag = branch.split(\"/\")[-1]\n name = tag if tag else baseline[:7]\n if not names_only:\n print(f\"{name}:\")\n for exp_name in exps[baseline]:\n indent = \"\" if names_only else \"\\t\"\n print(f\"{indent}{exp_name}\")\n\n return 0\n\n\nclass CmdExperimentsPush(CmdBase):\n def run(self):\n\n self.repo.experiments.push(\n self.args.git_remote,\n self.args.experiment,\n force=self.args.force,\n push_cache=self.args.push_cache,\n dvc_remote=self.args.dvc_remote,\n jobs=self.args.jobs,\n run_cache=self.args.run_cache,\n )\n\n logger.info(\n \"Pushed experiment '%s' to Git remote '%s'.\",\n self.args.experiment,\n self.args.git_remote,\n )\n if not self.args.push_cache:\n logger.info(\n \"To push cached outputs for this experiment to DVC remote \"\n \"storage, re-run this command without '--no-cache'.\"\n )\n\n return 0\n\n\nclass CmdExperimentsPull(CmdBase):\n def run(self):\n\n self.repo.experiments.pull(\n self.args.git_remote,\n self.args.experiment,\n force=self.args.force,\n pull_cache=self.args.pull_cache,\n dvc_remote=self.args.dvc_remote,\n jobs=self.args.jobs,\n run_cache=self.args.run_cache,\n )\n\n logger.info(\n \"Pulled experiment '%s' from Git remote '%s'. \",\n self.args.experiment,\n self.args.git_remote,\n )\n if not self.args.pull_cache:\n logger.info(\n \"To pull cached outputs for this experiment from DVC remote \"\n \"storage, re-run this command without '--no-cache'.\"\n )\n\n return 0\n\n\nclass CmdExperimentsRemove(CmdBase):\n def run(self):\n\n self.repo.experiments.remove(\n exp_names=self.args.experiment, queue=self.args.queue,\n )\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n EXPERIMENTS_HELP = \"Commands to run and compare experiments.\"\n\n experiments_parser = subparsers.add_parser(\n \"experiments\",\n parents=[parent_parser],\n aliases=[\"exp\"],\n description=append_doc_link(EXPERIMENTS_HELP, \"exp\"),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help=EXPERIMENTS_HELP,\n )\n\n experiments_subparsers = experiments_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc experiments CMD --help` to display \"\n \"command-specific help.\",\n )\n\n fix_subparsers(experiments_subparsers)\n\n EXPERIMENTS_SHOW_HELP = \"Print experiments.\"\n experiments_show_parser = experiments_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SHOW_HELP, \"exp/show\"),\n help=EXPERIMENTS_SHOW_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_show_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from the tip of all Git branches.\",\n )\n experiments_show_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from all Git tags.\",\n )\n experiments_show_parser.add_argument(\n \"-A\",\n \"--all-commits\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from all Git commits.\",\n )\n experiments_show_parser.add_argument(\n \"-n\",\n \"--num\",\n type=int,\n default=1,\n dest=\"num\",\n metavar=\"<num>\",\n help=\"Show the last `num` commits from HEAD.\",\n )\n experiments_show_parser.add_argument(\n \"--no-pager\",\n action=\"store_true\",\n default=False,\n help=\"Do not pipe output into a pager.\",\n )\n experiments_show_parser.add_argument(\n \"--include-metrics\",\n action=\"append\",\n default=[],\n help=\"Include the specified metrics in output table.\",\n metavar=\"<metrics_list>\",\n )\n experiments_show_parser.add_argument(\n \"--exclude-metrics\",\n action=\"append\",\n default=[],\n help=\"Exclude the specified metrics from output table.\",\n metavar=\"<metrics_list>\",\n )\n experiments_show_parser.add_argument(\n \"--include-params\",\n action=\"append\",\n default=[],\n help=\"Include the specified params in output table.\",\n metavar=\"<params_list>\",\n )\n experiments_show_parser.add_argument(\n \"--exclude-params\",\n action=\"append\",\n default=[],\n help=\"Exclude the specified params from output table.\",\n metavar=\"<params_list>\",\n )\n experiments_show_parser.add_argument(\n \"--sort-by\",\n help=\"Sort related experiments by the specified metric or param.\",\n metavar=\"<metric/param>\",\n )\n experiments_show_parser.add_argument(\n \"--sort-order\",\n help=\"Sort order to use with --sort-by.\",\n choices=(\"asc\", \"desc\"),\n default=\"asc\",\n )\n experiments_show_parser.add_argument(\n \"--no-timestamp\",\n action=\"store_true\",\n default=False,\n help=\"Do not show experiment timestamps.\",\n )\n experiments_show_parser.add_argument(\n \"--sha\",\n action=\"store_true\",\n default=False,\n help=\"Always show git commit SHAs instead of branch/tag names.\",\n )\n experiments_show_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Print output in JSON format instead of a human-readable table.\",\n )\n experiments_show_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics/params to `n` digits precision after the decimal \"\n f\"point. Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"<n>\",\n )\n experiments_show_parser.set_defaults(func=CmdExperimentsShow)\n\n EXPERIMENTS_APPLY_HELP = (\n \"Apply the changes from an experiment to your workspace.\"\n )\n experiments_apply_parser = experiments_subparsers.add_parser(\n \"apply\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_APPLY_HELP, \"exp/apply\"),\n help=EXPERIMENTS_APPLY_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_apply_parser.add_argument(\n \"--no-force\",\n action=\"store_false\",\n dest=\"force\",\n help=\"Fail if this command would overwrite conflicting changes.\",\n )\n experiments_apply_parser.add_argument(\n \"experiment\", help=\"Experiment to be applied.\",\n ).complete = completion.EXPERIMENT\n experiments_apply_parser.set_defaults(func=CmdExperimentsApply)\n\n EXPERIMENTS_DIFF_HELP = (\n \"Show changes between experiments in the DVC repository.\"\n )\n experiments_diff_parser = experiments_subparsers.add_parser(\n \"diff\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_DIFF_HELP, \"exp/diff\"),\n help=EXPERIMENTS_DIFF_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_diff_parser.add_argument(\n \"a_rev\", nargs=\"?\", help=\"Old experiment to compare (defaults to HEAD)\"\n ).complete = completion.EXPERIMENT\n experiments_diff_parser.add_argument(\n \"b_rev\",\n nargs=\"?\",\n help=\"New experiment to compare (defaults to the current workspace)\",\n ).complete = completion.EXPERIMENT\n experiments_diff_parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Show unchanged metrics/params as well.\",\n )\n experiments_diff_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n experiments_diff_parser.add_argument(\n \"--show-md\",\n action=\"store_true\",\n default=False,\n help=\"Show tabulated output in the Markdown format (GFM).\",\n )\n experiments_diff_parser.add_argument(\n \"--old\",\n action=\"store_true\",\n default=False,\n help=\"Show old metric/param value.\",\n )\n experiments_diff_parser.add_argument(\n \"--no-path\",\n action=\"store_true\",\n default=False,\n help=\"Don't show metric/param path.\",\n )\n experiments_diff_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics/params to `n` digits precision after the decimal \"\n f\"point. Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"<n>\",\n )\n experiments_diff_parser.set_defaults(func=CmdExperimentsDiff)\n\n EXPERIMENTS_RUN_HELP = (\n \"Reproduce complete or partial experiment pipelines.\"\n )\n experiments_run_parser = experiments_subparsers.add_parser(\n \"run\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_RUN_HELP, \"exp/run\"),\n help=EXPERIMENTS_RUN_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n _add_run_common(experiments_run_parser)\n experiments_run_parser.add_argument(\n \"-r\",\n \"--rev\",\n type=str,\n dest=\"checkpoint_resume\",\n help=(\n \"Continue the specified checkpoint experiment. \"\n \"(Only required for explicitly resuming checkpoints in queued \"\n \"or temp dir runs.)\"\n ),\n metavar=\"<experiment_rev>\",\n ).complete = completion.EXPERIMENT\n experiments_run_parser.add_argument(\n \"--reset\",\n action=\"store_true\",\n help=\"Reset existing checkpoints and restart the experiment.\",\n )\n experiments_run_parser.set_defaults(func=CmdExperimentsRun)\n\n EXPERIMENTS_GC_HELP = \"Garbage collect unneeded experiments.\"\n EXPERIMENTS_GC_DESCRIPTION = (\n \"Removes all experiments which are not derived from the specified\"\n \"Git revisions.\"\n )\n experiments_gc_parser = experiments_subparsers.add_parser(\n \"gc\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_GC_DESCRIPTION, \"exp/gc\"),\n help=EXPERIMENTS_GC_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_gc_parser.add_argument(\n \"-w\",\n \"--workspace\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from the current workspace.\",\n )\n experiments_gc_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from the tips of all Git branches.\",\n )\n experiments_gc_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from all Git tags.\",\n )\n experiments_gc_parser.add_argument(\n \"--all-commits\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from all Git commits.\",\n )\n experiments_gc_parser.add_argument(\n \"--queued\",\n action=\"store_true\",\n default=False,\n help=(\n \"Keep queued experiments (experiments run queue will be cleared \"\n \"by default).\"\n ),\n )\n experiments_gc_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force garbage collection - automatically agree to all prompts.\",\n )\n experiments_gc_parser.set_defaults(func=CmdExperimentsGC)\n\n EXPERIMENTS_BRANCH_HELP = \"Promote an experiment to a Git branch.\"\n experiments_branch_parser = experiments_subparsers.add_parser(\n \"branch\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_BRANCH_HELP, \"exp/branch\"),\n help=EXPERIMENTS_BRANCH_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_branch_parser.add_argument(\n \"experiment\", help=\"Experiment to be promoted.\",\n )\n experiments_branch_parser.add_argument(\n \"branch\", help=\"Git branch name to use.\",\n )\n experiments_branch_parser.set_defaults(func=CmdExperimentsBranch)\n\n EXPERIMENTS_LIST_HELP = \"List local and remote experiments.\"\n experiments_list_parser = experiments_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_LIST_HELP, \"exp/list\"),\n help=EXPERIMENTS_LIST_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_list_parser.add_argument(\n \"--rev\",\n type=str,\n default=None,\n help=(\n \"List experiments derived from the specified revision. \"\n \"Defaults to HEAD if neither `--rev` nor `--all` are specified.\"\n ),\n metavar=\"<rev>\",\n )\n experiments_list_parser.add_argument(\n \"--all\", action=\"store_true\", help=\"List all experiments.\",\n )\n experiments_list_parser.add_argument(\n \"--names-only\",\n action=\"store_true\",\n help=\"Only output experiment names (without parent commits).\",\n )\n experiments_list_parser.add_argument(\n \"git_remote\",\n nargs=\"?\",\n default=None,\n help=(\n \"Optional Git remote name or Git URL. If provided, experiments \"\n \"from the specified Git repository will be listed instead of \"\n \"local experiments.\"\n ),\n metavar=\"[<git_remote>]\",\n )\n experiments_list_parser.set_defaults(func=CmdExperimentsList)\n\n EXPERIMENTS_PUSH_HELP = \"Push a local experiment to a Git remote.\"\n experiments_push_parser = experiments_subparsers.add_parser(\n \"push\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_PUSH_HELP, \"exp/push\"),\n help=EXPERIMENTS_PUSH_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_push_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Replace experiment in the Git remote if it already exists.\",\n )\n experiments_push_parser.add_argument(\n \"--no-cache\",\n action=\"store_false\",\n dest=\"push_cache\",\n help=(\n \"Do not push cached outputs for this experiment to DVC remote \"\n \"storage.\"\n ),\n )\n experiments_push_parser.add_argument(\n \"-r\",\n \"--remote\",\n dest=\"dvc_remote\",\n metavar=\"<name>\",\n help=\"Name of the DVC remote to use when pushing cached outputs.\",\n )\n experiments_push_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n metavar=\"<number>\",\n help=(\n \"Number of jobs to run simultaneously when pushing to DVC remote \"\n \"storage.\"\n ),\n )\n experiments_push_parser.add_argument(\n \"--run-cache\",\n action=\"store_true\",\n default=False,\n help=\"Push run history for all stages.\",\n )\n experiments_push_parser.add_argument(\n \"git_remote\",\n help=\"Git remote name or Git URL.\",\n metavar=\"<git_remote>\",\n )\n experiments_push_parser.add_argument(\n \"experiment\", help=\"Experiment to push.\", metavar=\"<experiment>\",\n ).complete = completion.EXPERIMENT\n experiments_push_parser.set_defaults(func=CmdExperimentsPush)\n\n EXPERIMENTS_PULL_HELP = \"Pull an experiment from a Git remote.\"\n experiments_pull_parser = experiments_subparsers.add_parser(\n \"pull\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_PULL_HELP, \"exp/pull\"),\n help=EXPERIMENTS_PULL_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_pull_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Replace local experiment already exists.\",\n )\n experiments_pull_parser.add_argument(\n \"--no-cache\",\n action=\"store_false\",\n dest=\"pull_cache\",\n help=(\n \"Do not pull cached outputs for this experiment from DVC remote \"\n \"storage.\"\n ),\n )\n experiments_pull_parser.add_argument(\n \"-r\",\n \"--remote\",\n dest=\"dvc_remote\",\n metavar=\"<name>\",\n help=\"Name of the DVC remote to use when pulling cached outputs.\",\n )\n experiments_pull_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n metavar=\"<number>\",\n help=(\n \"Number of jobs to run simultaneously when pulling from DVC \"\n \"remote storage.\"\n ),\n )\n experiments_pull_parser.add_argument(\n \"--run-cache\",\n action=\"store_true\",\n default=False,\n help=\"Pull run history for all stages.\",\n )\n experiments_pull_parser.add_argument(\n \"git_remote\",\n help=\"Git remote name or Git URL.\",\n metavar=\"<git_remote>\",\n )\n experiments_pull_parser.add_argument(\n \"experiment\", help=\"Experiment to pull.\", metavar=\"<experiment>\",\n )\n experiments_pull_parser.set_defaults(func=CmdExperimentsPull)\n\n EXPERIMENTS_REMOVE_HELP = \"Remove local experiments.\"\n experiments_remove_parser = experiments_subparsers.add_parser(\n \"remove\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_REMOVE_HELP, \"exp/remove\"),\n help=EXPERIMENTS_REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_remove_parser.add_argument(\n \"--queue\", action=\"store_true\", help=\"Remove all queued experiments.\",\n )\n experiments_remove_parser.add_argument(\n \"experiment\",\n nargs=\"*\",\n help=\"Experiments to remove.\",\n metavar=\"<experiment>\",\n )\n experiments_remove_parser.set_defaults(func=CmdExperimentsRemove)\n\n\ndef _add_run_common(parser):\n \"\"\"Add common args for 'exp run' and 'exp resume'.\"\"\"\n # inherit arguments from `dvc repro`\n add_repro_arguments(parser)\n parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n parser.add_argument(\n \"-S\",\n \"--set-param\",\n action=\"append\",\n default=[],\n help=\"Use the specified param value when reproducing pipelines.\",\n metavar=\"[<filename>:]<param_name>=<param_value>\",\n )\n parser.add_argument(\n \"--queue\",\n action=\"store_true\",\n default=False,\n help=\"Stage this experiment in the run queue for future execution.\",\n )\n parser.add_argument(\n \"--run-all\",\n action=\"store_true\",\n default=False,\n help=\"Execute all experiments in the run queue. Implies --temp.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n help=\"Run the specified number of experiments at a time in parallel.\",\n metavar=\"<number>\",\n )\n parser.add_argument(\n \"--temp\",\n action=\"store_true\",\n dest=\"tmp_dir\",\n help=(\n \"Run this experiment in a separate temporary directory instead of \"\n \"your workspace.\"\n ),\n )\n", "path": "dvc/command/experiments.py" } ]
[ { "content": "import argparse\nimport logging\nfrom collections import Counter, OrderedDict, defaultdict\nfrom collections.abc import Mapping\nfrom datetime import date, datetime\nfrom itertools import groupby\nfrom typing import Dict, Iterable, Optional\n\nimport dvc.prompt as prompt\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.command.metrics import DEFAULT_PRECISION\nfrom dvc.command.repro import CmdRepro\nfrom dvc.command.repro import add_arguments as add_repro_arguments\nfrom dvc.exceptions import DvcException, InvalidArgumentError\nfrom dvc.utils.flatten import flatten\n\nlogger = logging.getLogger(__name__)\n\n\nSHOW_MAX_WIDTH = 1024\n\n\ndef _filter_name(names, label, filter_strs):\n ret = defaultdict(dict)\n path_filters = defaultdict(list)\n\n for filter_s in filter_strs:\n path, _, name = filter_s.rpartition(\":\")\n path_filters[path].append(tuple(name.split(\".\")))\n\n for path, filters in path_filters.items():\n if path:\n match_paths = [path]\n else:\n match_paths = names.keys()\n for length, groups in groupby(filters, len):\n for group in groups:\n for match_path in match_paths:\n possible_names = [\n tuple(name.split(\".\")) for name in names[match_path]\n ]\n matches = [\n name\n for name in possible_names\n if name[:length] == group\n ]\n if not matches:\n name = \".\".join(group)\n raise InvalidArgumentError(\n f\"'{name}' does not match any known {label}\"\n )\n ret[match_path].update(\n {\".\".join(match): None for match in matches}\n )\n\n return ret\n\n\ndef _filter_names(\n names: Dict[str, Dict[str, None]],\n label: str,\n include: Optional[Iterable],\n exclude: Optional[Iterable],\n):\n if include and exclude:\n intersection = set(include) & set(exclude)\n if intersection:\n values = \", \".join(intersection)\n raise InvalidArgumentError(\n f\"'{values}' specified in both --include-{label} and\"\n f\" --exclude-{label}\"\n )\n\n if include:\n ret = _filter_name(names, label, include)\n else:\n ret = names\n\n if exclude:\n to_remove = _filter_name(names, label, exclude)\n for path in to_remove:\n if path in ret:\n for key in to_remove[path]:\n if key in ret[path]:\n del ret[path][key]\n\n return ret\n\n\ndef _update_names(names, items):\n for name, item in items:\n if isinstance(item, dict):\n item = flatten(item)\n names[name].update({key: None for key in item})\n\n\ndef _collect_names(all_experiments, **kwargs):\n metric_names = defaultdict(dict)\n param_names = defaultdict(dict)\n\n for _, experiments in all_experiments.items():\n for exp in experiments.values():\n _update_names(metric_names, exp.get(\"metrics\", {}).items())\n _update_names(param_names, exp.get(\"params\", {}).items())\n\n metric_names = _filter_names(\n metric_names,\n \"metrics\",\n kwargs.get(\"include_metrics\"),\n kwargs.get(\"exclude_metrics\"),\n )\n param_names = _filter_names(\n (param_names),\n \"params\",\n kwargs.get(\"include_params\"),\n kwargs.get(\"exclude_params\"),\n )\n\n return metric_names, param_names\n\n\ndef _collect_rows(\n base_rev,\n experiments,\n metric_names,\n param_names,\n precision=DEFAULT_PRECISION,\n no_timestamp=False,\n sort_by=None,\n sort_order=None,\n):\n from dvc.scm.git import Git\n\n if sort_by:\n sort_path, sort_name, sort_type = _sort_column(\n sort_by, metric_names, param_names\n )\n reverse = sort_order == \"desc\"\n experiments = _sort_exp(\n experiments, sort_path, sort_name, sort_type, reverse\n )\n\n new_checkpoint = True\n for i, (rev, exp) in enumerate(experiments.items()):\n row = []\n style = None\n queued = \"*\" if exp.get(\"queued\", False) else \"\"\n\n tip = exp.get(\"checkpoint_tip\")\n parent = \"\"\n if rev == \"baseline\":\n if Git.is_sha(base_rev):\n name_rev = base_rev[:7]\n else:\n name_rev = base_rev\n name = exp.get(\"name\", name_rev)\n row.append(f\"{name}\")\n style = \"bold\"\n else:\n if tip:\n parent_rev = exp.get(\"checkpoint_parent\", \"\")\n parent_exp = experiments.get(parent_rev, {})\n parent_tip = parent_exp.get(\"checkpoint_tip\")\n if tip == parent_tip:\n if new_checkpoint:\n tree = \"│ ╓\"\n else:\n tree = \"│ ╟\"\n new_checkpoint = False\n else:\n if parent_rev == base_rev:\n tree = \"├─╨\"\n else:\n tree = \"│ ╟\"\n parent = f\" ({parent_rev[:7]})\"\n new_checkpoint = True\n else:\n if i < len(experiments) - 1:\n tree = \"├──\"\n else:\n tree = \"└──\"\n new_checkpoint = True\n name = exp.get(\"name\", rev[:7])\n row.append(f\"{tree} {queued}{name}{parent}\")\n\n if not no_timestamp:\n row.append(_format_time(exp.get(\"timestamp\")))\n\n _extend_row(\n row, metric_names, exp.get(\"metrics\", {}).items(), precision\n )\n _extend_row(row, param_names, exp.get(\"params\", {}).items(), precision)\n\n yield row, style\n\n\ndef _sort_column(sort_by, metric_names, param_names):\n path, _, sort_name = sort_by.rpartition(\":\")\n matches = set()\n\n if path:\n if path in metric_names and sort_name in metric_names[path]:\n matches.add((path, sort_name, \"metrics\"))\n if path in param_names and sort_name in param_names[path]:\n matches.add((path, sort_name, \"params\"))\n else:\n for path in metric_names:\n if sort_name in metric_names[path]:\n matches.add((path, sort_name, \"metrics\"))\n for path in param_names:\n if sort_name in param_names[path]:\n matches.add((path, sort_name, \"params\"))\n\n if len(matches) == 1:\n return matches.pop()\n if len(matches) > 1:\n raise InvalidArgumentError(\n \"Ambiguous sort column '{}' matched '{}'\".format(\n sort_by,\n \", \".join([f\"{path}:{name}\" for path, name, _ in matches]),\n )\n )\n raise InvalidArgumentError(f\"Unknown sort column '{sort_by}'\")\n\n\ndef _sort_exp(experiments, sort_path, sort_name, typ, reverse):\n def _sort(item):\n rev, exp = item\n tip = exp.get(\"checkpoint_tip\")\n if tip and tip != rev:\n # Sort checkpoint experiments by tip commit\n return _sort((tip, experiments[tip]))\n data = exp.get(typ, {}).get(sort_path, {})\n val = flatten(data).get(sort_name)\n return (val is None, val)\n\n ret = OrderedDict()\n if \"baseline\" in experiments:\n ret[\"baseline\"] = experiments.pop(\"baseline\")\n\n ret.update(sorted(experiments.items(), key=_sort, reverse=reverse))\n return ret\n\n\ndef _format_time(timestamp):\n if timestamp is None:\n return \"-\"\n if timestamp.date() == date.today():\n fmt = \"%I:%M %p\"\n else:\n fmt = \"%b %d, %Y\"\n return timestamp.strftime(fmt)\n\n\ndef _format_field(val, precision=DEFAULT_PRECISION):\n if isinstance(val, float):\n fmt = f\"{{:.{precision}g}}\"\n return fmt.format(val)\n if isinstance(val, Mapping):\n return {k: _format_field(v) for k, v in val.items()}\n if isinstance(val, list):\n return [_format_field(x) for x in val]\n return str(val)\n\n\ndef _extend_row(row, names, items, precision):\n from rich.text import Text\n\n if not items:\n for keys in names.values():\n row.extend([\"-\"] * len(keys))\n return\n\n for fname, item in items:\n if isinstance(item, dict):\n item = flatten(item)\n else:\n item = {fname: item}\n for name in names[fname]:\n if name in item:\n value = item[name]\n if value is None:\n text = \"-\"\n else:\n # wrap field data in rich.Text, otherwise rich may\n # interpret unescaped braces from list/dict types as rich\n # markup tags\n text = Text(str(_format_field(value, precision)))\n row.append(text)\n else:\n row.append(\"-\")\n\n\ndef _parse_filter_list(param_list):\n ret = []\n for param_str in param_list:\n path, _, param_str = param_str.rpartition(\":\")\n if path:\n ret.extend(f\"{path}:{param}\" for param in param_str.split(\",\"))\n else:\n ret.extend(param_str.split(\",\"))\n return ret\n\n\ndef _experiments_table(all_experiments, **kwargs):\n from dvc.utils.table import Table\n\n include_metrics = _parse_filter_list(kwargs.pop(\"include_metrics\", []))\n exclude_metrics = _parse_filter_list(kwargs.pop(\"exclude_metrics\", []))\n include_params = _parse_filter_list(kwargs.pop(\"include_params\", []))\n exclude_params = _parse_filter_list(kwargs.pop(\"exclude_params\", []))\n\n metric_names, param_names = _collect_names(\n all_experiments,\n include_metrics=include_metrics,\n exclude_metrics=exclude_metrics,\n include_params=include_params,\n exclude_params=exclude_params,\n )\n\n table = Table()\n table.add_column(\n \"Experiment\", no_wrap=True, header_style=\"black on grey93\"\n )\n if not kwargs.get(\"no_timestamp\", False):\n table.add_column(\"Created\", header_style=\"black on grey93\")\n _add_data_columns(\n table,\n metric_names,\n justify=\"right\",\n no_wrap=True,\n header_style=\"black on cornsilk1\",\n )\n _add_data_columns(\n table, param_names, justify=\"left\", header_style=\"black on light_cyan1\"\n )\n\n for base_rev, experiments in all_experiments.items():\n for row, _, in _collect_rows(\n base_rev, experiments, metric_names, param_names, **kwargs,\n ):\n table.add_row(*row)\n\n return table\n\n\ndef _add_data_columns(table, names, **kwargs):\n count = Counter(\n name for path in names for name in names[path] for path in names\n )\n first = True\n for path in names:\n for name in names[path]:\n col_name = name if count[name] == 1 else f\"{path}:{name}\"\n kwargs[\"collapse\"] = False if first else True\n table.add_column(col_name, **kwargs)\n first = False\n\n\ndef _format_json(item):\n if isinstance(item, (date, datetime)):\n return item.isoformat()\n raise TypeError\n\n\nclass CmdExperimentsShow(CmdBase):\n def run(self):\n from rich.console import Console\n\n try:\n all_experiments = self.repo.experiments.show(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n sha_only=self.args.sha,\n num=self.args.num,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(all_experiments, default=_format_json))\n return 0\n\n if self.args.precision is None:\n precision = DEFAULT_PRECISION\n else:\n precision = self.args.precision\n\n table = _experiments_table(\n all_experiments,\n include_metrics=self.args.include_metrics,\n exclude_metrics=self.args.exclude_metrics,\n include_params=self.args.include_params,\n exclude_params=self.args.exclude_params,\n no_timestamp=self.args.no_timestamp,\n sort_by=self.args.sort_by,\n sort_order=self.args.sort_order,\n precision=precision,\n )\n\n console = Console()\n if self.args.no_pager:\n console.print(table)\n else:\n from dvc.utils.pager import DvcPager\n\n # NOTE: rich does not have native support for unlimited width\n # via pager. we override rich table compression by setting\n # console width to the full width of the table\n console_options = console.options\n console_options.max_width = SHOW_MAX_WIDTH\n measurement = table.__rich_measure__(console, console_options)\n console._width = ( # pylint: disable=protected-access\n measurement.maximum\n )\n with console.pager(pager=DvcPager(), styles=True):\n console.print(table)\n\n except DvcException:\n logger.exception(\"failed to show experiments\")\n return 1\n\n return 0\n\n\nclass CmdExperimentsApply(CmdBase):\n def run(self):\n\n self.repo.experiments.apply(\n self.args.experiment, force=self.args.force\n )\n\n return 0\n\n\ndef _show_diff(\n diff,\n title=\"\",\n markdown=False,\n no_path=False,\n old=False,\n precision=DEFAULT_PRECISION,\n):\n from dvc.utils.diff import table\n\n rows = []\n for fname, diff_ in diff.items():\n sorted_diff = OrderedDict(sorted(diff_.items()))\n for item, change in sorted_diff.items():\n row = [] if no_path else [fname]\n row.append(item)\n if old:\n row.append(_format_field(change.get(\"old\"), precision))\n row.append(_format_field(change[\"new\"], precision))\n row.append(\n _format_field(\n change.get(\"diff\", \"diff not supported\"), precision\n )\n )\n rows.append(row)\n\n header = [] if no_path else [\"Path\"]\n header.append(title)\n if old:\n header.extend([\"Old\", \"New\"])\n else:\n header.append(\"Value\")\n header.append(\"Change\")\n\n return table(header, rows, markdown)\n\n\nclass CmdExperimentsDiff(CmdBase):\n def run(self):\n\n try:\n diff = self.repo.experiments.diff(\n a_rev=self.args.a_rev,\n b_rev=self.args.b_rev,\n all=self.args.all,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(diff))\n else:\n if self.args.precision is None:\n precision = DEFAULT_PRECISION\n else:\n precision = self.args.precision\n\n diffs = [(\"metrics\", \"Metric\"), (\"params\", \"Param\")]\n for key, title in diffs:\n table = _show_diff(\n diff[key],\n title=title,\n markdown=self.args.show_md,\n no_path=self.args.no_path,\n old=self.args.old,\n precision=precision,\n )\n if table:\n logger.info(table)\n logger.info(\"\")\n\n except DvcException:\n logger.exception(\"failed to show experiments diff\")\n return 1\n\n return 0\n\n\nclass CmdExperimentsRun(CmdRepro):\n def run(self):\n from dvc.command.metrics import _show_metrics\n\n if self.args.reset and self.args.checkpoint_resume:\n raise InvalidArgumentError(\n \"--reset and --rev are mutually exclusive.\"\n )\n\n if self.args.reset:\n logger.info(\"Any existing checkpoints will be reset and re-run.\")\n\n results = self.repo.experiments.run(\n name=self.args.name,\n queue=self.args.queue,\n run_all=self.args.run_all,\n jobs=self.args.jobs,\n params=self.args.set_param,\n checkpoint_resume=self.args.checkpoint_resume,\n reset=self.args.reset,\n tmp_dir=self.args.tmp_dir,\n **self._repro_kwargs,\n )\n\n if self.args.metrics and results:\n metrics = self.repo.metrics.show(revs=list(results))\n metrics.pop(\"workspace\", None)\n logger.info(_show_metrics(metrics))\n\n return 0\n\n\ndef _raise_error_if_all_disabled(**kwargs):\n if not any(kwargs.values()):\n raise InvalidArgumentError(\n \"Either of `-w|--workspace`, `-a|--all-branches`, `-T|--all-tags` \"\n \"or `--all-commits` needs to be set.\"\n )\n\n\nclass CmdExperimentsGC(CmdRepro):\n def run(self):\n _raise_error_if_all_disabled(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n workspace=self.args.workspace,\n )\n\n msg = \"This will remove all experiments except those derived from \"\n\n msg += \"the workspace\"\n if self.args.all_commits:\n msg += \" and all git commits\"\n elif self.args.all_branches and self.args.all_tags:\n msg += \" and all git branches and tags\"\n elif self.args.all_branches:\n msg += \" and all git branches\"\n elif self.args.all_tags:\n msg += \" and all git tags\"\n msg += \" of the current repo.\"\n if self.args.queued:\n msg += \" Run queued experiments will be preserved.\"\n if self.args.queued:\n msg += \" Run queued experiments will be removed.\"\n\n logger.warning(msg)\n\n msg = \"Are you sure you want to proceed?\"\n if not self.args.force and not prompt.confirm(msg):\n return 1\n\n removed = self.repo.experiments.gc(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n workspace=self.args.workspace,\n queued=self.args.queued,\n )\n\n if removed:\n logger.info(\n f\"Removed {removed} experiments. To remove unused cache files \"\n \"use 'dvc gc'.\"\n )\n else:\n logger.info(\"No experiments to remove.\")\n return 0\n\n\nclass CmdExperimentsBranch(CmdBase):\n def run(self):\n\n self.repo.experiments.branch(self.args.experiment, self.args.branch)\n\n return 0\n\n\nclass CmdExperimentsList(CmdBase):\n def run(self):\n names_only = self.args.names_only\n exps = self.repo.experiments.ls(\n rev=self.args.rev,\n git_remote=self.args.git_remote,\n all_=self.args.all,\n )\n for baseline in exps:\n tag = self.repo.scm.describe(baseline)\n if not tag:\n branch = self.repo.scm.describe(baseline, base=\"refs/heads\")\n if branch:\n tag = branch.split(\"/\")[-1]\n name = tag if tag else baseline[:7]\n if not names_only:\n print(f\"{name}:\")\n for exp_name in exps[baseline]:\n indent = \"\" if names_only else \"\\t\"\n print(f\"{indent}{exp_name}\")\n\n return 0\n\n\nclass CmdExperimentsPush(CmdBase):\n def run(self):\n\n self.repo.experiments.push(\n self.args.git_remote,\n self.args.experiment,\n force=self.args.force,\n push_cache=self.args.push_cache,\n dvc_remote=self.args.dvc_remote,\n jobs=self.args.jobs,\n run_cache=self.args.run_cache,\n )\n\n logger.info(\n \"Pushed experiment '%s' to Git remote '%s'.\",\n self.args.experiment,\n self.args.git_remote,\n )\n if not self.args.push_cache:\n logger.info(\n \"To push cached outputs for this experiment to DVC remote \"\n \"storage, re-run this command without '--no-cache'.\"\n )\n\n return 0\n\n\nclass CmdExperimentsPull(CmdBase):\n def run(self):\n\n self.repo.experiments.pull(\n self.args.git_remote,\n self.args.experiment,\n force=self.args.force,\n pull_cache=self.args.pull_cache,\n dvc_remote=self.args.dvc_remote,\n jobs=self.args.jobs,\n run_cache=self.args.run_cache,\n )\n\n logger.info(\n \"Pulled experiment '%s' from Git remote '%s'. \",\n self.args.experiment,\n self.args.git_remote,\n )\n if not self.args.pull_cache:\n logger.info(\n \"To pull cached outputs for this experiment from DVC remote \"\n \"storage, re-run this command without '--no-cache'.\"\n )\n\n return 0\n\n\nclass CmdExperimentsRemove(CmdBase):\n def run(self):\n\n self.repo.experiments.remove(\n exp_names=self.args.experiment, queue=self.args.queue,\n )\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n EXPERIMENTS_HELP = \"Commands to run and compare experiments.\"\n\n experiments_parser = subparsers.add_parser(\n \"experiments\",\n parents=[parent_parser],\n aliases=[\"exp\"],\n description=append_doc_link(EXPERIMENTS_HELP, \"exp\"),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help=EXPERIMENTS_HELP,\n )\n\n experiments_subparsers = experiments_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc experiments CMD --help` to display \"\n \"command-specific help.\",\n )\n\n fix_subparsers(experiments_subparsers)\n\n EXPERIMENTS_SHOW_HELP = \"Print experiments.\"\n experiments_show_parser = experiments_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SHOW_HELP, \"exp/show\"),\n help=EXPERIMENTS_SHOW_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_show_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from the tip of all Git branches.\",\n )\n experiments_show_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from all Git tags.\",\n )\n experiments_show_parser.add_argument(\n \"-A\",\n \"--all-commits\",\n action=\"store_true\",\n default=False,\n help=\"Show experiments derived from all Git commits.\",\n )\n experiments_show_parser.add_argument(\n \"-n\",\n \"--num\",\n type=int,\n default=1,\n dest=\"num\",\n metavar=\"<num>\",\n help=\"Show the last `num` commits from HEAD.\",\n )\n experiments_show_parser.add_argument(\n \"--no-pager\",\n action=\"store_true\",\n default=False,\n help=\"Do not pipe output into a pager.\",\n )\n experiments_show_parser.add_argument(\n \"--include-metrics\",\n action=\"append\",\n default=[],\n help=\"Include the specified metrics in output table.\",\n metavar=\"<metrics_list>\",\n )\n experiments_show_parser.add_argument(\n \"--exclude-metrics\",\n action=\"append\",\n default=[],\n help=\"Exclude the specified metrics from output table.\",\n metavar=\"<metrics_list>\",\n )\n experiments_show_parser.add_argument(\n \"--include-params\",\n action=\"append\",\n default=[],\n help=\"Include the specified params in output table.\",\n metavar=\"<params_list>\",\n )\n experiments_show_parser.add_argument(\n \"--exclude-params\",\n action=\"append\",\n default=[],\n help=\"Exclude the specified params from output table.\",\n metavar=\"<params_list>\",\n )\n experiments_show_parser.add_argument(\n \"--sort-by\",\n help=\"Sort related experiments by the specified metric or param.\",\n metavar=\"<metric/param>\",\n )\n experiments_show_parser.add_argument(\n \"--sort-order\",\n help=\"Sort order to use with --sort-by.\",\n choices=(\"asc\", \"desc\"),\n default=\"asc\",\n )\n experiments_show_parser.add_argument(\n \"--no-timestamp\",\n action=\"store_true\",\n default=False,\n help=\"Do not show experiment timestamps.\",\n )\n experiments_show_parser.add_argument(\n \"--sha\",\n action=\"store_true\",\n default=False,\n help=\"Always show git commit SHAs instead of branch/tag names.\",\n )\n experiments_show_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Print output in JSON format instead of a human-readable table.\",\n )\n experiments_show_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics/params to `n` digits precision after the decimal \"\n f\"point. Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"<n>\",\n )\n experiments_show_parser.set_defaults(func=CmdExperimentsShow)\n\n EXPERIMENTS_APPLY_HELP = (\n \"Apply the changes from an experiment to your workspace.\"\n )\n experiments_apply_parser = experiments_subparsers.add_parser(\n \"apply\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_APPLY_HELP, \"exp/apply\"),\n help=EXPERIMENTS_APPLY_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_apply_parser.add_argument(\n \"--no-force\",\n action=\"store_false\",\n dest=\"force\",\n help=\"Fail if this command would overwrite conflicting changes.\",\n )\n experiments_apply_parser.add_argument(\n \"experiment\", help=\"Experiment to be applied.\",\n ).complete = completion.EXPERIMENT\n experiments_apply_parser.set_defaults(func=CmdExperimentsApply)\n\n EXPERIMENTS_DIFF_HELP = (\n \"Show changes between experiments in the DVC repository.\"\n )\n experiments_diff_parser = experiments_subparsers.add_parser(\n \"diff\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_DIFF_HELP, \"exp/diff\"),\n help=EXPERIMENTS_DIFF_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_diff_parser.add_argument(\n \"a_rev\", nargs=\"?\", help=\"Old experiment to compare (defaults to HEAD)\"\n ).complete = completion.EXPERIMENT\n experiments_diff_parser.add_argument(\n \"b_rev\",\n nargs=\"?\",\n help=\"New experiment to compare (defaults to the current workspace)\",\n ).complete = completion.EXPERIMENT\n experiments_diff_parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Show unchanged metrics/params as well.\",\n )\n experiments_diff_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n experiments_diff_parser.add_argument(\n \"--show-md\",\n action=\"store_true\",\n default=False,\n help=\"Show tabulated output in the Markdown format (GFM).\",\n )\n experiments_diff_parser.add_argument(\n \"--old\",\n action=\"store_true\",\n default=False,\n help=\"Show old metric/param value.\",\n )\n experiments_diff_parser.add_argument(\n \"--no-path\",\n action=\"store_true\",\n default=False,\n help=\"Don't show metric/param path.\",\n )\n experiments_diff_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics/params to `n` digits precision after the decimal \"\n f\"point. Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"<n>\",\n )\n experiments_diff_parser.set_defaults(func=CmdExperimentsDiff)\n\n EXPERIMENTS_RUN_HELP = (\n \"Reproduce complete or partial experiment pipelines.\"\n )\n experiments_run_parser = experiments_subparsers.add_parser(\n \"run\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_RUN_HELP, \"exp/run\"),\n help=EXPERIMENTS_RUN_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n _add_run_common(experiments_run_parser)\n experiments_run_parser.add_argument(\n \"-r\",\n \"--rev\",\n type=str,\n dest=\"checkpoint_resume\",\n help=(\n \"Continue the specified checkpoint experiment. \"\n \"(Only required for explicitly resuming checkpoints in queued \"\n \"or temp dir runs.)\"\n ),\n metavar=\"<experiment_rev>\",\n ).complete = completion.EXPERIMENT\n experiments_run_parser.add_argument(\n \"--reset\",\n action=\"store_true\",\n help=\"Reset existing checkpoints and restart the experiment.\",\n )\n experiments_run_parser.set_defaults(func=CmdExperimentsRun)\n\n EXPERIMENTS_GC_HELP = \"Garbage collect unneeded experiments.\"\n EXPERIMENTS_GC_DESCRIPTION = (\n \"Removes all experiments which are not derived from the specified\"\n \"Git revisions.\"\n )\n experiments_gc_parser = experiments_subparsers.add_parser(\n \"gc\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_GC_DESCRIPTION, \"exp/gc\"),\n help=EXPERIMENTS_GC_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_gc_parser.add_argument(\n \"-w\",\n \"--workspace\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from the current workspace.\",\n )\n experiments_gc_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from the tips of all Git branches.\",\n )\n experiments_gc_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from all Git tags.\",\n )\n experiments_gc_parser.add_argument(\n \"--all-commits\",\n action=\"store_true\",\n default=False,\n help=\"Keep experiments derived from all Git commits.\",\n )\n experiments_gc_parser.add_argument(\n \"--queued\",\n action=\"store_true\",\n default=False,\n help=(\n \"Keep queued experiments (experiments run queue will be cleared \"\n \"by default).\"\n ),\n )\n experiments_gc_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force garbage collection - automatically agree to all prompts.\",\n )\n experiments_gc_parser.set_defaults(func=CmdExperimentsGC)\n\n EXPERIMENTS_BRANCH_HELP = \"Promote an experiment to a Git branch.\"\n experiments_branch_parser = experiments_subparsers.add_parser(\n \"branch\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_BRANCH_HELP, \"exp/branch\"),\n help=EXPERIMENTS_BRANCH_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_branch_parser.add_argument(\n \"experiment\", help=\"Experiment to be promoted.\",\n )\n experiments_branch_parser.add_argument(\n \"branch\", help=\"Git branch name to use.\",\n )\n experiments_branch_parser.set_defaults(func=CmdExperimentsBranch)\n\n EXPERIMENTS_LIST_HELP = \"List local and remote experiments.\"\n experiments_list_parser = experiments_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_LIST_HELP, \"exp/list\"),\n help=EXPERIMENTS_LIST_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_list_parser.add_argument(\n \"--rev\",\n type=str,\n default=None,\n help=(\n \"List experiments derived from the specified revision. \"\n \"Defaults to HEAD if neither `--rev` nor `--all` are specified.\"\n ),\n metavar=\"<rev>\",\n )\n experiments_list_parser.add_argument(\n \"--all\", action=\"store_true\", help=\"List all experiments.\",\n )\n experiments_list_parser.add_argument(\n \"--names-only\",\n action=\"store_true\",\n help=\"Only output experiment names (without parent commits).\",\n )\n experiments_list_parser.add_argument(\n \"git_remote\",\n nargs=\"?\",\n default=None,\n help=(\n \"Optional Git remote name or Git URL. If provided, experiments \"\n \"from the specified Git repository will be listed instead of \"\n \"local experiments.\"\n ),\n metavar=\"[<git_remote>]\",\n )\n experiments_list_parser.set_defaults(func=CmdExperimentsList)\n\n EXPERIMENTS_PUSH_HELP = \"Push a local experiment to a Git remote.\"\n experiments_push_parser = experiments_subparsers.add_parser(\n \"push\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_PUSH_HELP, \"exp/push\"),\n help=EXPERIMENTS_PUSH_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_push_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Replace experiment in the Git remote if it already exists.\",\n )\n experiments_push_parser.add_argument(\n \"--no-cache\",\n action=\"store_false\",\n dest=\"push_cache\",\n help=(\n \"Do not push cached outputs for this experiment to DVC remote \"\n \"storage.\"\n ),\n )\n experiments_push_parser.add_argument(\n \"-r\",\n \"--remote\",\n dest=\"dvc_remote\",\n metavar=\"<name>\",\n help=\"Name of the DVC remote to use when pushing cached outputs.\",\n )\n experiments_push_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n metavar=\"<number>\",\n help=(\n \"Number of jobs to run simultaneously when pushing to DVC remote \"\n \"storage.\"\n ),\n )\n experiments_push_parser.add_argument(\n \"--run-cache\",\n action=\"store_true\",\n default=False,\n help=\"Push run history for all stages.\",\n )\n experiments_push_parser.add_argument(\n \"git_remote\",\n help=\"Git remote name or Git URL.\",\n metavar=\"<git_remote>\",\n )\n experiments_push_parser.add_argument(\n \"experiment\", help=\"Experiment to push.\", metavar=\"<experiment>\",\n ).complete = completion.EXPERIMENT\n experiments_push_parser.set_defaults(func=CmdExperimentsPush)\n\n EXPERIMENTS_PULL_HELP = \"Pull an experiment from a Git remote.\"\n experiments_pull_parser = experiments_subparsers.add_parser(\n \"pull\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_PULL_HELP, \"exp/pull\"),\n help=EXPERIMENTS_PULL_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_pull_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Replace local experiment already exists.\",\n )\n experiments_pull_parser.add_argument(\n \"--no-cache\",\n action=\"store_false\",\n dest=\"pull_cache\",\n help=(\n \"Do not pull cached outputs for this experiment from DVC remote \"\n \"storage.\"\n ),\n )\n experiments_pull_parser.add_argument(\n \"-r\",\n \"--remote\",\n dest=\"dvc_remote\",\n metavar=\"<name>\",\n help=\"Name of the DVC remote to use when pulling cached outputs.\",\n )\n experiments_pull_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n metavar=\"<number>\",\n help=(\n \"Number of jobs to run simultaneously when pulling from DVC \"\n \"remote storage.\"\n ),\n )\n experiments_pull_parser.add_argument(\n \"--run-cache\",\n action=\"store_true\",\n default=False,\n help=\"Pull run history for all stages.\",\n )\n experiments_pull_parser.add_argument(\n \"git_remote\",\n help=\"Git remote name or Git URL.\",\n metavar=\"<git_remote>\",\n )\n experiments_pull_parser.add_argument(\n \"experiment\", help=\"Experiment to pull.\", metavar=\"<experiment>\",\n )\n experiments_pull_parser.set_defaults(func=CmdExperimentsPull)\n\n EXPERIMENTS_REMOVE_HELP = \"Remove local experiments.\"\n experiments_remove_parser = experiments_subparsers.add_parser(\n \"remove\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_REMOVE_HELP, \"exp/remove\"),\n help=EXPERIMENTS_REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n experiments_remove_parser.add_argument(\n \"--queue\", action=\"store_true\", help=\"Remove all queued experiments.\",\n )\n experiments_remove_parser.add_argument(\n \"experiment\",\n nargs=\"*\",\n help=\"Experiments to remove.\",\n metavar=\"<experiment>\",\n )\n experiments_remove_parser.set_defaults(func=CmdExperimentsRemove)\n\n\ndef _add_run_common(parser):\n \"\"\"Add common args for 'exp run' and 'exp resume'.\"\"\"\n # inherit arguments from `dvc repro`\n add_repro_arguments(parser)\n parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n parser.add_argument(\n \"-S\",\n \"--set-param\",\n action=\"append\",\n default=[],\n help=\"Use the specified param value when reproducing pipelines.\",\n metavar=\"[<filename>:]<param_name>=<param_value>\",\n )\n parser.add_argument(\n \"--queue\",\n action=\"store_true\",\n default=False,\n help=\"Stage this experiment in the run queue for future execution.\",\n )\n parser.add_argument(\n \"--run-all\",\n action=\"store_true\",\n default=False,\n help=\"Execute all experiments in the run queue. Implies --temp.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n help=\"Run the specified number of experiments at a time in parallel.\",\n metavar=\"<number>\",\n )\n parser.add_argument(\n \"--temp\",\n action=\"store_true\",\n dest=\"tmp_dir\",\n help=(\n \"Run this experiment in a separate temporary directory instead of \"\n \"your workspace.\"\n ),\n )\n", "path": "dvc/command/experiments.py" } ]
diff --git a/dvc/command/experiments.py b/dvc/command/experiments.py index b8180a1c9d..90d860c8c2 100644 --- a/dvc/command/experiments.py +++ b/dvc/command/experiments.py @@ -268,7 +268,8 @@ def _extend_row(row, names, items, precision): from rich.text import Text if not items: - row.extend(["-"] * len(names)) + for keys in names.values(): + row.extend(["-"] * len(keys)) return for fname, item in items:
ManimCommunity__manim-1879
v0.9.0 is installed but poetry shell says: You are using manim version v0.6.0, but version v0.9.0 is available. ## Description of bug / unexpected behavior <!-- Add a clear and concise description of the problem you encountered. --> v0.9.0 is installed but poetry shell says: `You are using manim version v0.6.0, but version v0.9.0 is available.` This isn't a show stopping issue but it is annoying and has been around for several releases. ## Expected behavior <!-- Add a clear and concise description of what you expected to happen. --> The correct version would be displayed. ## How to reproduce the issue <!-- Provide a piece of code illustrating the undesired behavior. --> <details><summary>Code for reproducing the problem</summary> ```py Paste your code here. ``` </details> ## Additional media files <!-- Paste in the files manim produced on rendering the code above. --> <details><summary>Images/GIFs</summary> <!-- PASTE MEDIA HERE --> </details> ## Logs <details><summary>Terminal output</summary> <!-- Add "-v DEBUG" when calling manim to generate more detailed logs --> ``` PASTE HERE OR PROVIDE LINK TO https://pastebin.com/ OR SIMILAR ``` <!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) --> </details> ## System specifications <details><summary>System Details</summary> - OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)): Ubuntu 20.04 - RAM: 16GB - Python version (`python/py/python3 --version`): 3.8.10 - Installed modules (provide output from `pip list`): ``` Package Version --------------------------------- ------------ alabaster 0.7.12 anyio 3.2.1 apipkg 1.5 appdirs 1.4.4 argon2-cffi 20.1.0 astroid 2.6.2 async-generator 1.10 attrs 21.2.0 Babel 2.9.1 backcall 0.2.0 backports.entry-points-selectable 1.1.0 beautifulsoup4 4.9.3 black 20.8b1 bleach 3.3.0 certifi 2021.5.30 cffi 1.14.6 cfgv 3.3.0 chardet 4.0.0 charset-normalizer 2.0.1 click 8.0.1 click-default-group 1.2.2 cloup 0.7.1 colorama 0.4.4 colour 0.1.5 commonmark 0.9.1 coverage 5.5 cycler 0.10.0 dearpygui 0.8.31 debugpy 1.3.0 decorator 5.0.9 defusedxml 0.7.1 Deprecated 1.2.12 distlib 0.3.2 docutils 0.16 entrypoints 0.3 execnet 1.9.0 filelock 3.0.12 flake8 3.9.2 flake8-bugbear 21.4.3 flake8-builtins 1.5.3 flake8-comprehensions 3.4.0 flake8-docstrings 1.6.0 flake8-logging-format 0.6.0 flake8-plugin-utils 1.3.1 flake8-pytest-style 1.4.1 flake8-rst-docstrings 0.0.14 furo 2021.6.18b36 gitdb 4.0.7 GitPython 3.1.18 glcontext 2.3.4 guzzle-sphinx-theme 0.7.11 identify 2.2.11 idna 3.2 imagesize 1.2.0 iniconfig 1.1.1 ipykernel 5.5.5 ipython 7.25.0 ipython-genutils 0.2.0 isort 5.9.2 jedi 0.18.0 Jinja2 3.0.1 json5 0.9.6 jsonschema 3.2.0 jupyter-client 6.2.0 jupyter-core 4.7.1 jupyter-server 1.9.0 jupyterlab 3.0.16 jupyterlab-pygments 0.1.2 jupyterlab-server 2.6.1 kiwisolver 1.3.1 lazy-object-proxy 1.6.0 manim 0.9.0 ManimPango 0.3.0 mapbox-earcut 0.12.10 MarkupSafe 2.0.1 matplotlib 3.4.2 matplotlib-inline 0.1.2 mccabe 0.6.1 mistune 0.8.4 moderngl 5.6.4 moderngl-window 2.4.0 mpmath 1.2.1 multipledispatch 0.6.0 mypy-extensions 0.4.3 nbclassic 0.3.1 nbclient 0.5.3 nbconvert 6.1.0 nbformat 5.1.3 nest-asyncio 1.5.1 networkx 2.6.1 nodeenv 1.6.0 notebook 6.4.0 numpy 1.21.0 packaging 21.0 pandas 1.1.5 pandocfilters 1.4.3 parso 0.8.2 pathspec 0.8.1 pexpect 4.8.0 pickleshare 0.7.5 Pillow 8.2.0 pip 21.0.1 platformdirs 2.0.2 pluggy 0.13.1 pre-commit 2.13.0 prometheus-client 0.11.0 prompt-toolkit 3.0.19 psutil 5.8.0 ptyprocess 0.7.0 py 1.10.0 pycairo 1.20.1 pycodestyle 2.7.0 pycparser 2.20 pydocstyle 6.0.0 pydub 0.25.1 pyflakes 2.3.1 PyGithub 1.55 pyglet 1.5.18 Pygments 2.9.0 PyJWT 2.1.0 pylint 2.9.3 PyNaCl 1.4.0 pyparsing 2.4.7 pyrr 0.10.3 pyrsistent 0.18.0 pytest 6.2.4 pytest-cov 2.12.1 pytest-forked 1.3.0 pytest-xdist 2.3.0 python-dateutil 2.8.1 pytz 2021.1 PyYAML 5.4.1 pyzmq 22.1.0 recommonmark 0.7.1 regex 2021.7.6 requests 2.26.0 requests-unixsocket 0.2.0 restructuredtext-lint 1.3.2 rich 10.6.0 scipy 1.6.0 screeninfo 0.6.7 Send2Trash 1.7.1 setuptools 52.0.0 six 1.16.0 smmap 4.0.0 sniffio 1.2.0 snowballstemmer 2.1.0 soupsieve 2.2.1 Sphinx 3.5.4 sphinx-copybutton 0.4.0 sphinxcontrib-applehelp 1.0.2 sphinxcontrib-devhelp 1.0.2 sphinxcontrib-htmlhelp 2.0.0 sphinxcontrib-jsmath 1.0.1 sphinxcontrib-qthelp 1.0.3 sphinxcontrib-serializinghtml 1.1.5 sphinxext-opengraph 0.4.2 terminado 0.10.1 testpath 0.5.0 toml 0.10.2 tornado 6.1 tqdm 4.61.2 traitlets 5.0.5 typed-ast 1.4.3 typing-extensions 3.10.0.0 urllib3 1.26.6 virtualenv 20.5.0 watchdog 2.1.3 wcwidth 0.2.5 webencodings 0.5.1 websocket-client 1.1.0 wheel 0.36.2 wrapt 1.12.1 ``` </details> <details><summary>LaTeX details</summary> + LaTeX distribution (e.g. TeX Live 2020): + Installed LaTeX packages: <!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX --> </details> <details><summary>FFMPEG</summary> Output of `ffmpeg -version`: ``` PASTE HERE ``` </details> ## Additional comments <!-- Add further context that you think might be relevant for this issue here. -->
[ { "content": "#!/usr/bin/env python\n\n# flake8: noqa\n\ntry:\n import importlib.metadata as importlib_metadata\nexcept ModuleNotFoundError:\n import importlib_metadata\n\n__version__ = importlib_metadata.version(__name__)\n\n\nimport sys\n\n# Importing the config module should be the first thing we do, since other\n# modules depend on the global config dict for initialization.\nfrom ._config import *\n\n# Workaround to set the renderer passed via CLI args *before* importing\n# Manim's classes (as long as the metaclass approach for switching\n# between OpenGL and cairo rendering is in place, classes depend\n# on the value of config.renderer).\nfor i, arg in enumerate(sys.argv):\n if arg.startswith(\"--renderer\"):\n if \"=\" in arg:\n _, parsed_renderer = arg.split(\"=\")\n else:\n parsed_renderer = sys.argv[i + 1]\n config.renderer = parsed_renderer\n elif arg == \"--use_opengl_renderer\":\n config.renderer = \"opengl\"\n elif arg == \"--use_webgl_renderer\":\n config.renderer = \"webgl\"\n\n\nfrom .animation.animation import *\nfrom .animation.composition import *\nfrom .animation.creation import *\nfrom .animation.fading import *\nfrom .animation.growing import *\nfrom .animation.indication import *\nfrom .animation.movement import *\nfrom .animation.numbers import *\nfrom .animation.rotation import *\nfrom .animation.specialized import *\nfrom .animation.transform import *\nfrom .animation.transform_matching_parts import *\nfrom .animation.update import *\nfrom .camera.camera import *\nfrom .camera.mapping_camera import *\nfrom .camera.moving_camera import *\nfrom .camera.multi_camera import *\nfrom .camera.three_d_camera import *\nfrom .constants import *\nfrom .mobject.changing import *\nfrom .mobject.coordinate_systems import *\nfrom .mobject.frame import *\nfrom .mobject.functions import *\nfrom .mobject.geometry import *\nfrom .mobject.graph import *\nfrom .mobject.logo import *\nfrom .mobject.matrix import *\nfrom .mobject.mobject import *\nfrom .mobject.mobject_update_utils import *\nfrom .mobject.number_line import *\nfrom .mobject.numbers import *\nfrom .mobject.polyhedra import *\nfrom .mobject.probability import *\nfrom .mobject.shape_matchers import *\nfrom .mobject.svg.brace import *\nfrom .mobject.svg.code_mobject import *\nfrom .mobject.svg.style_utils import *\nfrom .mobject.svg.svg_mobject import *\nfrom .mobject.svg.svg_path import *\nfrom .mobject.svg.tex_mobject import *\nfrom .mobject.svg.text_mobject import *\nfrom .mobject.table import *\nfrom .mobject.three_d_utils import *\nfrom .mobject.three_dimensions import *\nfrom .mobject.types.image_mobject import *\nfrom .mobject.types.point_cloud_mobject import *\nfrom .mobject.types.vectorized_mobject import *\nfrom .mobject.value_tracker import *\nfrom .mobject.vector_field import *\nfrom .renderer.cairo_renderer import *\nfrom .scene.moving_camera_scene import *\nfrom .scene.reconfigurable_scene import *\nfrom .scene.sample_space_scene import *\nfrom .scene.scene import *\nfrom .scene.scene_file_writer import *\nfrom .scene.three_d_scene import *\nfrom .scene.vector_space_scene import *\nfrom .scene.zoomed_scene import *\nfrom .utils import color as color\nfrom .utils import rate_functions, unit\nfrom .utils.bezier import *\nfrom .utils.color import *\nfrom .utils.config_ops import *\nfrom .utils.debug import *\nfrom .utils.file_ops import *\nfrom .utils.images import *\nfrom .utils.iterables import *\nfrom .utils.paths import *\nfrom .utils.rate_functions import *\nfrom .utils.simple_functions import *\nfrom .utils.sounds import *\nfrom .utils.space_ops import *\nfrom .utils.strings import *\nfrom .utils.tex import *\nfrom .utils.tex_templates import *\n\ntry:\n from IPython import get_ipython\n\n from .utils.ipython_magic import ManimMagic\nexcept ImportError:\n pass\nelse:\n ipy = get_ipython()\n if ipy is not None:\n ipy.register_magics(ManimMagic)\n\nfrom .plugins import *\n", "path": "manim/__init__.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# flake8: noqa\n\nimport pkg_resources\n\n__version__ = pkg_resources.get_distribution(__name__).version\n\n\nimport sys\n\n# Importing the config module should be the first thing we do, since other\n# modules depend on the global config dict for initialization.\nfrom ._config import *\n\n# Workaround to set the renderer passed via CLI args *before* importing\n# Manim's classes (as long as the metaclass approach for switching\n# between OpenGL and cairo rendering is in place, classes depend\n# on the value of config.renderer).\nfor i, arg in enumerate(sys.argv):\n if arg.startswith(\"--renderer\"):\n if \"=\" in arg:\n _, parsed_renderer = arg.split(\"=\")\n else:\n parsed_renderer = sys.argv[i + 1]\n config.renderer = parsed_renderer\n elif arg == \"--use_opengl_renderer\":\n config.renderer = \"opengl\"\n elif arg == \"--use_webgl_renderer\":\n config.renderer = \"webgl\"\n\n\nfrom .animation.animation import *\nfrom .animation.composition import *\nfrom .animation.creation import *\nfrom .animation.fading import *\nfrom .animation.growing import *\nfrom .animation.indication import *\nfrom .animation.movement import *\nfrom .animation.numbers import *\nfrom .animation.rotation import *\nfrom .animation.specialized import *\nfrom .animation.transform import *\nfrom .animation.transform_matching_parts import *\nfrom .animation.update import *\nfrom .camera.camera import *\nfrom .camera.mapping_camera import *\nfrom .camera.moving_camera import *\nfrom .camera.multi_camera import *\nfrom .camera.three_d_camera import *\nfrom .constants import *\nfrom .mobject.changing import *\nfrom .mobject.coordinate_systems import *\nfrom .mobject.frame import *\nfrom .mobject.functions import *\nfrom .mobject.geometry import *\nfrom .mobject.graph import *\nfrom .mobject.logo import *\nfrom .mobject.matrix import *\nfrom .mobject.mobject import *\nfrom .mobject.mobject_update_utils import *\nfrom .mobject.number_line import *\nfrom .mobject.numbers import *\nfrom .mobject.polyhedra import *\nfrom .mobject.probability import *\nfrom .mobject.shape_matchers import *\nfrom .mobject.svg.brace import *\nfrom .mobject.svg.code_mobject import *\nfrom .mobject.svg.style_utils import *\nfrom .mobject.svg.svg_mobject import *\nfrom .mobject.svg.svg_path import *\nfrom .mobject.svg.tex_mobject import *\nfrom .mobject.svg.text_mobject import *\nfrom .mobject.table import *\nfrom .mobject.three_d_utils import *\nfrom .mobject.three_dimensions import *\nfrom .mobject.types.image_mobject import *\nfrom .mobject.types.point_cloud_mobject import *\nfrom .mobject.types.vectorized_mobject import *\nfrom .mobject.value_tracker import *\nfrom .mobject.vector_field import *\nfrom .renderer.cairo_renderer import *\nfrom .scene.moving_camera_scene import *\nfrom .scene.reconfigurable_scene import *\nfrom .scene.sample_space_scene import *\nfrom .scene.scene import *\nfrom .scene.scene_file_writer import *\nfrom .scene.three_d_scene import *\nfrom .scene.vector_space_scene import *\nfrom .scene.zoomed_scene import *\nfrom .utils import color as color\nfrom .utils import rate_functions, unit\nfrom .utils.bezier import *\nfrom .utils.color import *\nfrom .utils.config_ops import *\nfrom .utils.debug import *\nfrom .utils.file_ops import *\nfrom .utils.images import *\nfrom .utils.iterables import *\nfrom .utils.paths import *\nfrom .utils.rate_functions import *\nfrom .utils.simple_functions import *\nfrom .utils.sounds import *\nfrom .utils.space_ops import *\nfrom .utils.strings import *\nfrom .utils.tex import *\nfrom .utils.tex_templates import *\n\ntry:\n from IPython import get_ipython\n\n from .utils.ipython_magic import ManimMagic\nexcept ImportError:\n pass\nelse:\n ipy = get_ipython()\n if ipy is not None:\n ipy.register_magics(ManimMagic)\n\nfrom .plugins import *\n", "path": "manim/__init__.py" } ]
diff --git a/manim/__init__.py b/manim/__init__.py index 3b6cf0bd9d..516c80c93c 100644 --- a/manim/__init__.py +++ b/manim/__init__.py @@ -2,12 +2,9 @@ # flake8: noqa -try: - import importlib.metadata as importlib_metadata -except ModuleNotFoundError: - import importlib_metadata +import pkg_resources -__version__ = importlib_metadata.version(__name__) +__version__ = pkg_resources.get_distribution(__name__).version import sys
django-cms__django-cms-2189
FIxes doc issues listed in #2148 Index stops yielding 404 Link to changes of 3.0 works.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# django cms documentation build configuration file, created by\n# sphinx-quickstart on Tue Sep 15 10:47:03 2009.\n#\n# This file is execfile()d with the current directory set to its containing\n# dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out serve\n# to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another\n# directory, add these directories to sys.path here. If the directory is\n# relative to the documentation root, use os.path.abspath to make it absolute,\n# like shown here.\n\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('..'))\nsys.path.append(os.path.join(os.path.abspath('.'), '_ext'))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n#extensions = ['sphinx.ext.autodoc']\n\nextensions = ['djangocms', 'sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/2.6', None),\n 'django': ('http://readthedocs.org/docs/django/en/latest/', None),\n 'classytags': ('http://readthedocs.org/docs/django-classy-tags/en/latest/', None),\n 'sekizai': ('http://readthedocs.org/docs/django-sekizai/en/latest/', None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'django cms'\ncopyright = u'2009, Patrick Lauber'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n\npath = os.path.split(os.path.dirname(__file__))[0]\npath = os.path.split(path)[0]\nsys.path.insert(0, path)\nimport cms\n\nversion = cms.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = cms.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation for\n# a list of supported languages.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be\n# searched for source files.\nexclude_trees = ['build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description unit\n# titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'djangocmsdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'a4'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'djangocms.tex', u'django cms Documentation',\n u'Patrick Lauber', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top\n# of the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n", "path": "docs/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# django cms documentation build configuration file, created by\n# sphinx-quickstart on Tue Sep 15 10:47:03 2009.\n#\n# This file is execfile()d with the current directory set to its containing\n# dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out serve\n# to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another\n# directory, add these directories to sys.path here. If the directory is\n# relative to the documentation root, use os.path.abspath to make it absolute,\n# like shown here.\n\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('..'))\nsys.path.append(os.path.join(os.path.abspath('.'), '_ext'))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n#extensions = ['sphinx.ext.autodoc']\n\nextensions = ['djangocms', 'sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/2.6', None),\n 'django': ('http://readthedocs.org/docs/django/en/latest/', None),\n 'classytags': ('http://readthedocs.org/docs/django-classy-tags/en/latest/', None),\n 'sekizai': ('http://readthedocs.org/docs/django-sekizai/en/latest/', None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\n#templates_path = ['templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'django cms'\ncopyright = u'2009, Patrick Lauber'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n\npath = os.path.split(os.path.dirname(__file__))[0]\npath = os.path.split(path)[0]\nsys.path.insert(0, path)\nimport cms\n\nversion = cms.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = cms.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation for\n# a list of supported languages.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be\n# searched for source files.\nexclude_trees = ['build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description unit\n# titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'djangocmsdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'a4'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'djangocms.tex', u'django cms Documentation',\n u'Patrick Lauber', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top\n# of the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n", "path": "docs/conf.py" } ]
diff --git a/docs/conf.py b/docs/conf.py index 4d82d357ef6..81c9fa7abfa 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,7 +38,7 @@ } # Add any paths that contain templates here, relative to this directory. -templates_path = ['templates'] +#templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' diff --git a/docs/getting_started/tutorial.rst b/docs/getting_started/tutorial.rst index 0ba3990d7c4..8a952846388 100644 --- a/docs/getting_started/tutorial.rst +++ b/docs/getting_started/tutorial.rst @@ -89,13 +89,16 @@ Also add any (or all) of the following plugins, depending on your needs: * ``'cms.plugins.teaser'`` * ``'djangocms_text_ckeditor # note this needs to be above the 'cms' entry`` * ``'cms.plugins.video'`` -* ``'cms.plugins.twitter'`` .. warning:: Adding the ``'cms.plugins.snippet'`` plugin is a potential security hazard. For more information, refer to :ref:`snippets-plugin`. + In addition, ``'cms.plugins.text'`` and ``'cms.plugins.twitter'`` have + been removed from the Django-CMS bundle. Read :ref:`upgrade-to-3.0` for + detailed information. + The plugins are described in more detail in chapter :doc:`Plugins reference <plugin_reference>`. There are even more plugins available on the django CMS `extensions page`_. @@ -390,7 +393,7 @@ Up and running! =============== That should be it. Restart your development server using ``python manage.py runserver`` -and point a web browser to `127.0.0.1:8000 <http://127.0.0.1:8000>`_ :you should get +and point a web browser to `127.0.0.1:8000 <http://127.0.0.1:8000>`_ : you should get the django CMS "It Worked" screen. |it-works-cms| diff --git a/docs/upgrade/3.0.rst b/docs/upgrade/3.0.rst index 6a25bea518f..23024beccbb 100644 --- a/docs/upgrade/3.0.rst +++ b/docs/upgrade/3.0.rst @@ -1,3 +1,5 @@ +.. _upgrade-to-3.0: + ################# 3.0 release notes ################# @@ -6,9 +8,6 @@ What's new in 3.0 ***************** - -.. _upgrade-to-3.0: - .. warning:: Upgrading from previous versions 3.0 introduces some changes that **require** action if you are upgrading
pyodide__pyodide-3136
The content area in the docs is too narrow ## 📚 Documentation In the documentation strings, rendered code examples only fit 63 characters of width. It would be nice if we could make the content area a bit larger so that code examples fit at least 80 characters. On my screen, the content area is exactly the middle third of the screen, with the left and right thirds devoted to menus.
[ { "content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.22.0.dev0\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n # insert the Plausible analytics script to console.html\n console_path = Path(\"_build/html/console.html\")\n console_html = console_path.read_text().splitlines(keepends=True)\n for idx, line in enumerate(list(console_html)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n console_path.write_text(\"\".join(console_html))\n\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py" } ]
[ { "content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\npanels_add_bootstrap_css = False\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.22.0.dev0\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n # insert the Plausible analytics script to console.html\n console_path = Path(\"_build/html/console.html\")\n console_html = console_path.read_text().splitlines(keepends=True)\n for idx, line in enumerate(list(console_html)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n console_path.write_text(\"\".join(console_html))\n\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py" } ]
diff --git a/docs/conf.py b/docs/conf.py index ebb8f25a10c..37839824f3f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,6 +11,8 @@ from typing import Any from unittest import mock +panels_add_bootstrap_css = False + # -- Project information ----------------------------------------------------- project = "Pyodide"
pyca__cryptography-2522
Unpin pytest revert https://github.com/pyca/cryptography/pull/2513 waiting on a pytest release with https://github.com/pytest-dev/pytest/issues/1238 landed
[ { "content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.1.0\")\n setup_requirements.append(\"cffi>=1.1.0\")\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest!=2.8.4\",\n \"pretend\",\n \"iso8601\",\n \"hypothesis\",\n \"pyasn1_modules\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\", \"tests\", \"tests.*\"]\n ),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.1.0\")\n setup_requirements.append(\"cffi>=1.1.0\")\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n \"hypothesis\",\n \"pyasn1_modules\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\", \"tests\", \"tests.*\"]\n ),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py" } ]
diff --git a/dev-requirements.txt b/dev-requirements.txt index 9aca5dd4411d..c409ff9217e2 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -7,7 +7,7 @@ iso8601 pep8-naming pretend pyasn1_modules -pytest!=2.8.4 +pytest requests sphinx==1.3.1 sphinx_rtd_theme diff --git a/setup.py b/setup.py index 43fe17e7c416..19f1e66382a2 100644 --- a/setup.py +++ b/setup.py @@ -59,7 +59,7 @@ # If you add a new dep here you probably need to add it in the tox.ini as well test_requirements = [ - "pytest!=2.8.4", + "pytest", "pretend", "iso8601", "hypothesis", diff --git a/tox.ini b/tox.ini index cf899b741dd3..63f1e4bd0d91 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,7 @@ deps = coverage iso8601 pretend - pytest!=2.8.4 + pytest hypothesis>=1.11.4 pyasn1_modules ./vectors
mathesar-foundation__mathesar-841
Use correct test client parameters when sending json body payload ## Problem Currently, When sending a request containing a json payload using the Django rest framework test client, the payload is being converted into a string using `json.dumps` but the Django rest framework provides convenience parameters that does this automatically. ## Proposed solution Use the `format` parameter of the DRF test client or set the default payload format in the DRF settings, in order for the test client to be able to handle the conversion automatically
[ { "content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n}\n\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\n\n\nSTATICFILES_DIRS = [MATHESAR_UI_BUILD_LOCATION]\n", "path": "config/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n}\n\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\n\n\nSTATICFILES_DIRS = [MATHESAR_UI_BUILD_LOCATION]\n", "path": "config/settings.py" } ]
diff --git a/config/settings.py b/config/settings.py index 931c6d9819..893f81add3 100644 --- a/config/settings.py +++ b/config/settings.py @@ -175,6 +175,7 @@ def pipe_delim(pipe_string): 'django_filters.rest_framework.DjangoFilterBackend', 'rest_framework.filters.OrderingFilter', ), + 'TEST_REQUEST_DEFAULT_FORMAT': 'json', } # Mathesar settings diff --git a/mathesar/tests/api/test_column_api.py b/mathesar/tests/api/test_column_api.py index c49fe29ec8..0ff730e642 100644 --- a/mathesar/tests/api/test_column_api.py +++ b/mathesar/tests/api/test_column_api.py @@ -1,4 +1,3 @@ -import json from datetime import date, timedelta import pytest @@ -120,8 +119,7 @@ def test_column_create(column_test_table, client): } response = client.post( f"/api/v0/tables/{column_test_table.id}/columns/", - data=json.dumps(data), - content_type='application/json' + data=data, ) assert response.status_code == 201 new_columns_response = client.get( @@ -152,7 +150,7 @@ def test_column_create_default( cache.clear() name = "anewcolumn" data = {"name": name, "type": type_, "default": default} - response = client.post(f"/api/v0/tables/{column_test_table.id}/columns/", data) + response = client.post(f"/api/v0/tables/{column_test_table.id}/columns/", data, format='multipart') assert response.status_code == 201 # Ensure the correct serialized date is returned by the API @@ -196,8 +194,7 @@ def test_column_create_retrieve_options(column_test_table, client, type_, type_o } response = client.post( f"/api/v0/tables/{column_test_table.id}/columns/", - data=json.dumps(data), - content_type='application/json' + data=data, ) assert response.status_code == 201 new_columns_response = client.get( @@ -228,8 +225,7 @@ def test_column_create_bad_options(column_test_table, client, type_options): } response = client.post( f"/api/v0/tables/{column_test_table.id}/columns/", - data=json.dumps(data), - content_type='application/json' + data=data, ) assert response.status_code == 400 @@ -293,7 +289,6 @@ def test_column_update_display_options(column_test_table, client): response = client.patch( f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", display_options_data, - format='json' ) assert response.json()["display_options"] == display_options @@ -318,7 +313,7 @@ def test_column_update_default(column_test_table, client): def test_column_update_delete_default(column_test_table, client): cache.clear() expt_default = None - data = json.dumps({"default": None}) + data = {"default": None} response = client.get( f"/api/v0/tables/{column_test_table.id}/columns/" ) @@ -326,8 +321,8 @@ def test_column_update_delete_default(column_test_table, client): column_index = 2 column_id = columns[column_index]['id'] response = client.patch( - f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", data=data, - content_type="application/json" + f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", + data=data, ) assert response.json()["default"] == expt_default @@ -450,7 +445,6 @@ def test_column_update_type_options(column_test_table, client): response = client.patch( f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", data, - format='json' ) assert response.json()["type"] == type_ assert response.json()["type_options"] == type_options @@ -469,14 +463,12 @@ def test_column_update_type_options_no_type(column_test_table, client): client.patch( f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", data, - format='json' ) type_options = {"precision": 3, "scale": 1} type_option_data = {"type_options": type_options} response = client.patch( f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", type_option_data, - format='json' ) assert response.json()["type"] == type_ assert response.json()["type_options"] == type_options @@ -529,8 +521,7 @@ def test_column_update_type_invalid_options(column_test_table, client, type_opti column_id = columns[column_index]['id'] response = client.patch( f"/api/v0/tables/{column_test_table.id}/columns/{column_id}/", - data=json.dumps(data), - content_type='application/json' + data=data, ) assert response.status_code == 400 diff --git a/mathesar/tests/api/test_data_file_api.py b/mathesar/tests/api/test_data_file_api.py index 2ecfcc48a0..ba70a4996e 100644 --- a/mathesar/tests/api/test_data_file_api.py +++ b/mathesar/tests/api/test_data_file_api.py @@ -127,7 +127,7 @@ def test_data_file_create_csv(client, csv_filename, header): with open(csv_filename, 'rb') as csv_file: data = {'file': csv_file, 'header': header} - response = client.post('/api/v0/data_files/', data) + response = client.post('/api/v0/data_files/', data, format='multipart') with open(csv_filename, 'r') as csv_file: correct_dialect = csv.get_sv_dialect(csv_file) check_create_data_file_response( @@ -140,7 +140,7 @@ def test_data_file_create_csv_long_name(client, csv_filename): with open(csv_filename, 'rb') as csv_file: with patch.object(os.path, 'basename', lambda _: '0' * 101): data = {'file': csv_file} - response = client.post('/api/v0/data_files/', data) + response = client.post('/api/v0/data_files/', data, format='multipart') data_file_dict = response.json() assert response.status_code == 400 assert 'Ensure this filename has at most 100' in data_file_dict['file'][0] @@ -203,7 +203,7 @@ def test_data_file_create_invalid_file(client): with patch.object(csv, "get_sv_dialect") as mock_infer: mock_infer.side_effect = InvalidTableError with open(file, 'r') as f: - response = client.post('/api/v0/data_files/', data={'file': f}) + response = client.post('/api/v0/data_files/', data={'file': f}, format='multipart') response_dict = response.json() assert response.status_code == 400 assert response_dict[0] == 'Unable to tabulate data' @@ -211,7 +211,7 @@ def test_data_file_create_invalid_file(client): def test_data_file_create_non_unicode_file(client, non_unicode_csv_filename): with open(non_unicode_csv_filename, 'rb') as non_unicode_file: - response = client.post('/api/v0/data_files/', data={'file': non_unicode_file}) + response = client.post('/api/v0/data_files/', data={'file': non_unicode_file}, format='multipart') assert response.status_code == 201 @@ -257,7 +257,7 @@ def test_data_file_create_multiple_source_fields(client, csv_filename, paste_fil paste_text = paste_file.read() with open(csv_filename, 'rb') as csv_file: data = {'file': csv_file, 'paste': paste_text} - response = client.post('/api/v0/data_files/', data) + response = client.post('/api/v0/data_files/', data, format='multipart') response_dict = response.json() assert response.status_code == 400 assert 'Multiple source fields passed:' in response_dict['non_field_errors'][0] diff --git a/mathesar/tests/api/test_table_api.py b/mathesar/tests/api/test_table_api.py index 4e7c6443bf..2d9f4d0a74 100644 --- a/mathesar/tests/api/test_table_api.py +++ b/mathesar/tests/api/test_table_api.py @@ -418,7 +418,7 @@ def test_table_previews(client, schema, engine_email_type): {"name": "col_6", "type": "NUMERIC"} ] } - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 200 expect_dict = { 'name': 'Type Modification Table', @@ -459,7 +459,7 @@ def test_table_previews_wrong_column_number(client, schema, engine_email_type): {"name": "col_6", "type": "NUMERIC"} ] } - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 400 assert "number" in response.json()[0] @@ -489,7 +489,7 @@ def test_table_previews_invalid_type_cast(client, schema, engine_email_type): {"name": "col_6", "type": "NUMERIC"} ] } - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 400 assert "Invalid type" in response.json()[0] @@ -519,7 +519,7 @@ def test_table_previews_invalid_type_cast_check(client, schema, engine_email_typ {"name": "col_6", "type": "NUMERIC"} ] } - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 400 assert "Invalid type" in response.json()[0] @@ -549,7 +549,7 @@ def test_table_previews_unsupported_type(client, schema, engine_email_type): {"name": "col_6", "type": "NUMERIC"} ] } - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 400 assert "not supported" in response.json()[0] @@ -569,7 +569,7 @@ def test_table_previews_missing_columns(client, schema, engine_email_type): table = Table.objects.get(id=response_table['id']) post_body = {} - response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body, format='json') + response = client.post(f'/api/v0/tables/{table.id}/previews/', data=post_body) assert response.status_code == 400 assert "columns" in response.json() @@ -1011,7 +1011,7 @@ def test_table_patch_same_table_name(create_table, client): body = {'name': table_name} # Need to specify format here because otherwise the body gets sent # as a multi-part form, which can't handle nested keys. - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) assert response.status_code == 200 assert response.json()['name'] == table_name @@ -1027,7 +1027,7 @@ def test_table_patch_columns_and_table_name(create_table, client): } # Need to specify format here because otherwise the body gets sent # as a multi-part form, which can't handle nested keys. - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_error = response.json() assert response.status_code == 400 @@ -1042,7 +1042,7 @@ def test_table_patch_columns_no_changes(create_table, client, engine_email_type) body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1058,7 +1058,7 @@ def test_table_patch_columns_one_name_change(create_table, client, engine_email_ body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1075,7 +1075,7 @@ def test_table_patch_columns_two_name_changes(create_table, client, engine_email body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1091,7 +1091,7 @@ def test_table_patch_columns_one_type_change(create_table, client, engine_email_ body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1128,7 +1128,7 @@ def test_table_patch_columns_multiple_type_change(create_data_types_table, clien body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1150,7 +1150,7 @@ def test_table_patch_columns_one_drop(create_data_types_table, client, engine_em body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1168,7 +1168,7 @@ def test_table_patch_columns_multiple_drop(create_data_types_table, client, engi body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1185,7 +1185,7 @@ def test_table_patch_columns_diff_name_type_change(create_data_types_table, clie body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1202,7 +1202,7 @@ def test_table_patch_columns_same_name_type_change(create_data_types_table, clie body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1221,7 +1221,7 @@ def test_table_patch_columns_multiple_name_type_change(create_data_types_table, body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1239,7 +1239,7 @@ def test_table_patch_columns_diff_name_type_drop(create_data_types_table, client body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1258,7 +1258,7 @@ def test_table_patch_columns_same_name_type_drop(create_data_types_table, client body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 200 @@ -1274,7 +1274,7 @@ def test_table_patch_columns_invalid_type(create_data_types_table, client, engin body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 400 @@ -1291,7 +1291,7 @@ def test_table_patch_columns_invalid_type_with_name(create_data_types_table, cli body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 400 assert 'Pizza is not a boolean' in response_json[0] @@ -1312,7 +1312,7 @@ def test_table_patch_columns_invalid_type_with_type(create_data_types_table, cli body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 400 assert 'Pizza is not a boolean' in response_json[0] @@ -1333,7 +1333,7 @@ def test_table_patch_columns_invalid_type_with_drop(create_data_types_table, cli body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 400 assert 'Pizza is not a boolean' in response_json[0] @@ -1356,7 +1356,7 @@ def test_table_patch_columns_invalid_type_with_multiple_changes(create_data_type body = { 'columns': column_data } - response = client.patch(f'/api/v0/tables/{table.id}/', body, format='json') + response = client.patch(f'/api/v0/tables/{table.id}/', body) response_json = response.json() assert response.status_code == 400 assert 'Pizza is not a boolean' in response_json[0]
ioos__compliance-checker-252
Misplaced High/Highly Recommended attribute checks for ACDD check in plaintext output When invoking compliance checker against one of the test data files with the ACDD check, some of the highly recommended attributes are misplaced. `python cchecker.py -t acdd compliance_checker/tests/data/2dim-grid.nc > cc_output.txt` [cc_output.txt](https://github.com/ioos/compliance-checker/files/224776/cc_output.txt) Notice that only `keywords` are contained under the "High Priority" header, despite the fact that `title` and `summary` are high priority, as well as under the wrong header. The variables are in the [correct list of variables to check in the current master as of this writing.](https://github.com/ioos/compliance-checker/blob/a95fc7f0f9eff8f9b6980788f540beb8f622857b/compliance_checker/acdd.py#L24) I also checked the commits of some current production compliance-checker code and this issue appears to have existed back to 532426dc8117d91c0e87badb9b134535463a41c9, and probably beyond. ``` High Priority -------------------------------------------------------------------------------- Name :Priority: Score keywords :3: 0/1 Medium Priority -------------------------------------------------------------------------------- Name :Priority: Score keywords_vocabulary :2: 0/1 lat_extents :2: 0/0 license :2: 0/1 lon_extents :2: 0/0 naming_authority :2: 0/1 processing_level :2: 0/1 project :2: 0/1 standard_name_vocabulary :2: 0/1 summary :3: 0/1 time_coverage_duration :2: 0/1 time_coverage_end :2: 0/1 time_coverage_resolution :2: 0/1 time_coverage_start :2: 0/1 time_extents :2: 0/0 title :3: 0/1 varattr :3: 13/21 vertical_extents :2: 0/0 ``` Side note, we ought to add some integration testing for the output of the checker.
[ { "content": "\"\"\"\nCompliance Checker suite runner\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport inspect\nimport itertools\nimport json\nfrom netCDF4 import Dataset\nfrom lxml import etree as ET\nfrom compliance_checker.base import fix_return_value, Result\nfrom owslib.sos import SensorObservationService\nfrom owslib.swe.sensor.sml import SensorML\ntry:\n from urlparse import urlparse\nexcept:\n from urllib.parse import urlparse\nfrom datetime import datetime\nimport requests\nimport textwrap\n\n\nclass CheckSuite(object):\n\n checkers = {} # Base dict of checker names to BaseCheck derived types, override this in your CheckSuite implementation\n\n @classmethod\n def load_all_available_checkers(cls):\n \"\"\"\n Helper method to retrieve all sub checker classes derived from various\n base classes.\n \"\"\"\n from pkg_resources import working_set\n for x in working_set.iter_entry_points('compliance_checker.suites'):\n try:\n xl = x.load()\n\n cls.checkers[xl.name] = xl\n except Exception as e:\n print(\"Could not load\", x, \":\", e, file=sys.stderr)\n\n def _get_checks(self, checkclass):\n \"\"\"\n Helper method to retreive check methods from a Checker class.\n\n The name of the methods in the Checker class should start with \"check_\" for this\n method to find them.\n \"\"\"\n meths = inspect.getmembers(checkclass, inspect.ismethod)\n return [x[1] for x in meths if x[0].startswith(\"check_\")]\n\n def _run_check(self, check_method, ds):\n val = check_method(ds)\n\n if isinstance(val, list):\n return [fix_return_value(v, check_method.__func__.__name__, check_method, check_method.__self__) for v in val]\n\n return [fix_return_value(val, check_method.__func__.__name__, check_method, check_method.__self__)]\n\n def _get_valid_checkers(self, ds, checker_names):\n \"\"\"\n Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and\n the user selected names.\n \"\"\"\n if len(checker_names) == 0:\n checker_names = list(self.checkers.keys())\n\n args = [(name, self.checkers[name]) for name in checker_names if name in self.checkers]\n valid = []\n\n all_checked = set([a[1] for a in args]) # only class types\n checker_queue = set(args)\n\n while len(checker_queue):\n name, a = checker_queue.pop()\n if type(ds) in a().supported_ds:\n valid.append((name, a))\n\n # add all to queue\n for subc in a.__subclasses__():\n if subc not in all_checked:\n all_checked.add(subc)\n checker_queue.add((name, subc))\n\n return valid\n\n def run(self, ds, *checker_names):\n \"\"\"\n Runs this CheckSuite on the dataset with all the passed Checker instances.\n\n Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.\n \"\"\"\n\n ret_val = {}\n checkers = self._get_valid_checkers(ds, checker_names)\n\n if len(checkers) == 0:\n print(\"No valid checkers found for tests '%s'\" % \",\".join(checker_names))\n\n for checker_name, checker_class in checkers:\n\n checker = checker_class()\n checker.setup(ds)\n\n checks = self._get_checks(checker)\n vals = []\n errs = {} # check method name -> (exc, traceback)\n\n for c in checks:\n try:\n vals.extend(self._run_check(c, ds))\n except Exception as e:\n errs[c.__func__.__name__] = (e, sys.exc_info()[2])\n\n # score the results we got back\n groups = self.scores(vals)\n\n ret_val[checker_name] = groups, errs\n\n return ret_val\n\n @classmethod\n def passtree(cls, groups, limit):\n for r in groups:\n if r.children:\n x = cls.passtree(r.children, limit)\n if r.weight >= limit and x is False:\n return False\n\n if r.weight >= limit and r.value[0] != r.value[1]:\n return False\n\n return True\n\n def build_structure(self, check_name, groups, source_name, limit=1):\n '''\n Compiles the checks, results and scores into an aggregate structure which looks like:\n\n {\n \"scored_points\": 396,\n \"low_count\": 0,\n \"possible_points\": 400,\n \"testname\": \"gliderdac\",\n \"medium_count\": 2,\n \"source_name\": \".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc\",\n \"high_count\": 0,\n \"all_priorities\" : [...],\n \"high_priorities\": [...],\n \"medium_priorities\" : [...],\n \"low_priorities\" : [...]\n }\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param source_name Source of the dataset, used for title\n '''\n aggregates = {}\n\n aggregates['scored_points'] = 0\n aggregates['possible_points'] = 0\n high_priorities = []\n medium_priorities = []\n low_priorities = []\n all_priorities = []\n\n aggregates['high_count'] = 0\n aggregates['medium_count'] = 0\n aggregates['low_count'] = 0\n\n def named_function(result):\n for child in result.children:\n all_priorities.append(child)\n named_function(child)\n\n # For each result, bin them into the appropriate category, put them all\n # into the all_priorities category and add up the point values\n for res in groups:\n if res.weight < limit:\n continue\n # If the result has 0 possible points, then it was not valid for\n # this dataset and contains no meaningful information\n if res.value[1] == 0:\n continue\n aggregates['scored_points'] += res.value[0]\n aggregates['possible_points'] += res.value[1]\n if res.weight == 3:\n high_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['high_count'] += 1\n elif res.weight == 2:\n medium_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['medium_count'] += 1\n else:\n low_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['low_count'] += 1\n all_priorities.append(res)\n # Some results have children\n # We don't render children inline with the top three tables, but we\n # do total the points and display the messages\n named_function(res)\n\n aggregates['high_priorities'] = high_priorities\n aggregates['medium_priorities'] = medium_priorities\n aggregates['low_priorities'] = low_priorities\n aggregates['all_priorities'] = all_priorities\n aggregates['testname'] = check_name\n aggregates['source_name'] = source_name\n return aggregates\n\n def json_output(self, check_name, groups, file_object, source_name, limit):\n '''\n Builds the results into a JSON structure and writes it to the file buffer.\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param output_filename Path to file to save output\n @param file_object A python file object where the output should be written to\n @param source_name Source of the dataset, used for title\n @param limit Integer value for limiting output\n '''\n aggregates = self.build_structure(check_name, groups, source_name, limit)\n aggregates = self.serialize(aggregates)\n json_string = json.dumps(aggregates, ensure_ascii=False)\n file_object.write(str(json_string))\n return\n\n def serialize(self, o):\n '''\n Returns a safe serializable object that can be serialized into JSON.\n\n @param o Python object to serialize\n '''\n if isinstance(o, (list, tuple)):\n return [self.serialize(i) for i in o]\n if isinstance(o, dict):\n return {k: self.serialize(v) for k, v in o.items()}\n if isinstance(o, datetime):\n return o.isoformat()\n if isinstance(o, Result):\n return self.serialize(o.serialize())\n return o\n\n def html_output(self, check_name, groups, file_object, source_name, limit):\n '''\n Renders an HTML file using Jinja2 and saves the output to the file specified.\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param output_filename Path to file to save output\n @param file_object A python file object where the output should be written to\n @param source_name Source of the dataset, used for title\n @param limit Integer value for limiting output\n '''\n from jinja2 import Environment, PackageLoader\n self.j2 = Environment(loader=PackageLoader('compliance_checker', 'data/templates'))\n template = self.j2.get_template('ccheck.html.j2')\n\n template_vars = self.build_structure(check_name, groups, source_name, limit)\n\n buf = template.render(**template_vars)\n\n file_object.write(buf)\n\n def get_points(self, groups, limit):\n score_list = []\n score_only_list = []\n\n for v in range(len(groups)):\n score_list.append([groups[v].name, groups[v].weight, groups[v].value,\n groups[v].children])\n if groups[v].weight >= limit:\n score_only_list.append(groups[v].value)\n\n points = [x[0] for x in score_only_list]\n out_of = [x[1] for x in score_only_list]\n\n points = sum(points)\n out_of = sum(out_of)\n\n return score_list, points, out_of\n\n def standard_output(self, limit, check_name, groups):\n \"\"\"\n Generates the Terminal Output for Standard cases\n\n Returns the dataset needed for the verbose output, as well as the failure flags.\n \"\"\"\n score_list, points, out_of = self.get_points(groups, limit)\n print('\\n')\n print(\"-\" * 80)\n print('{:^80}'.format(\"The dataset scored %r out of %r points\" % (points, out_of)))\n print('{:^80}'.format(\"during the %s check\" % check_name))\n print(\"-\" * 80)\n\n return [score_list, points, out_of]\n\n def non_verbose_output_generation(self, score_list, groups, limit, points, out_of):\n\n if points < out_of:\n print('{:^80}'.format(\"Scoring Breakdown:\"))\n print('\\n')\n priority_flag = 3\n for x in range(len(score_list)):\n if score_list[x][1] == 3 and limit <= 3 :\n if priority_flag == 3:\n print('{:^80}'.format(\"High Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 2 and limit <= 2 :\n if priority_flag == 2:\n print('\\n')\n print('{:^80}'.format(\"Medium Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 1 and limit == 1 :\n if priority_flag == 1:\n print('\\n')\n print('{:^80}'.format(\"Low Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 1 and limit == 1 and priority_flag == 2:\n print('{:^80}'.format('No medium priority tests present'))\n print('-' * 80)\n priority_flag -= 1\n # Catch All for pretty presentation\n if priority_flag == 2 and limit == 2:\n print('{:^80}'.format('No Medium priority tests present'))\n print('-' * 80)\n\n if priority_flag == 2 and limit == 1:\n print('{:^80}'.format('No Medium priority tests present'))\n print('-' * 80)\n print('')\n print('{:^80}'.format('No Low priority tests present'))\n print('-' * 80)\n\n if priority_flag == 1 and limit == 1:\n print('{:^80}'.format('No Low priority tests present'))\n print('-' * 80)\n\n print(\"\\n\" + \"\\n\" + '-' * 80)\n print('{:^80}'.format('Reasoning for the failed tests given below:'))\n print('\\n')\n print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))\n print(\"-\" * 80)\n self.reasoning_routine(groups, 0)\n\n else:\n print(\"All tests passed!\")\n\n def verbose_output_generation(self, groups, limit, points, out_of):\n '''\n Generates the Terminal Output for Verbose cases\n '''\n priority_flag = 3\n print('{:^80}'.format(\"Verbose Scoring Breakdown:\"), end=' ')\n self.print_routine(groups, 0, priority_flag)\n if points < out_of:\n print(\"\\n\" + \"\\n\" + '-' * 80)\n print('{:^80}'.format('Reasoning for the failed tests given below:'))\n print('\\n')\n print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))\n print(\"-\" * 80)\n self.reasoning_routine(groups, 0)\n\n pass\n\n def print_routine(self, list_of_results, indent, priority_flag):\n \"\"\"\n print routine performed\n \"\"\"\n def weight_func(r):\n \"\"\"\n Function that returns the weight, used for sorting by priority\n \"\"\"\n return r.weight\n\n # Sorting method used to properly sort the output by priority.\n grouped_sorted = []\n grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)\n\n # Loop over input\n for res in grouped_sorted:\n # If statements to print the proper Headings\n if res.weight == 3 and indent == 0 and priority_flag == 3:\n print('\\n')\n print('{:^80}'.format(\"High Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n\n priority_flag -= 1\n if res.weight == 2 and indent == 0 and priority_flag == 2:\n print('\\n')\n print('{:^80}'.format(\"Medium Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n\n priority_flag -= 1\n if res.weight == 1 and indent == 0 and priority_flag == 1:\n print('\\n')\n print('{:^80}'.format(\"Low Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n\n print('%-40s:%s:%s%6s/%1s' % ((indent * ' ' + res.name)[0:39], res.weight, indent * ' ', res.value[0], res.value[1]))\n if res.children:\n self.print_routine(res.children, indent + 1, priority_flag)\n\n def reasoning_routine(self, list_of_results, indent, line = True):\n \"\"\"\n print routine performed\n \"\"\"\n def weight_func(r):\n \"\"\"\n Function that returns the weight, used for sorting by priority\n \"\"\"\n return r.weight\n\n # Sorting method used to properly sort the output by priority.\n grouped_sorted = []\n grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)\n\n wrapper = textwrap.TextWrapper(initial_indent = '', width = 80, subsequent_indent = ' ' * 54)\n for res in grouped_sorted:\n if (res.value[0] != res.value[1]) and not res.msgs:\n print('%-39s:%1s:%6s/%2s : %s' % ((indent * ' ' + res.name)[0:39], res.weight, res.value[0], res.value[1], ' '))\n\n if (res.value[0] != res.value[1]) and res.msgs:\n print(wrapper.fill('%-39s:%1s:%6s/%2s : %s' % ((indent * ' ' + res.name)[0:39], res.weight, res.value[0], res.value[1], \", \".join(res.msgs))))\n\n if res.children:\n self.reasoning_routine(res.children, indent + 1, False)\n\n def load_dataset(self, ds_str):\n \"\"\"\n Helper method to load a dataset or SOS GC/DS url.\n \"\"\"\n ds = None\n\n # try to figure out if this is a local NetCDF Dataset, a remote one, or an SOS GC/DS url\n doc = None\n pr = urlparse(ds_str)\n if pr.netloc: # looks like a remote url\n rhead = requests.head(ds_str)\n\n # if we get a 400 here, it's likely a Dataset openable OpenDAP url\n if rhead.status_code == 400:\n pass\n elif rhead.status_code == 200 and rhead.headers['content-type'] == 'text/xml':\n # probably interesting, grab it\n r = requests.get(ds_str)\n r.raise_for_status()\n\n doc = r.text\n else:\n raise Exception(\"Could not understand response code %s and content-type %s\" % (rhead.status_code, rhead.headers.get('content-type', 'none')))\n else:\n def is_binary_string(bts):\n # do a cheap imitation of libmagic\n # http://stackoverflow.com/a/7392391/84732\n if sys.version_info >= (3, ):\n join_str = ''\n textchars = join_str.join(map(chr, [7, 8, 9, 10, 12, 13, 27] + list(range(0x20, 0x100)))).encode()\n #textchars = textchars.encode()\n else:\n # because of `unicode_literals` import, we need to convert\n # to a Py2 string/bytes\n join_str = str('')\n textchars = join_str.join(map(chr, [7, 8, 9, 10, 12, 13, 27] + list(range(0x20, 0x100))))\n return bool(bts.translate(None, textchars))\n\n with open(ds_str, 'rb') as f:\n first_chunk = f.read(1024)\n if is_binary_string(first_chunk):\n # likely netcdf file\n pass\n else:\n f.seek(0)\n doc = \"\".join(f.readlines())\n\n if doc is not None:\n xml_doc = ET.fromstring(str(doc))\n if xml_doc.tag == \"{http://www.opengis.net/sos/1.0}Capabilities\":\n ds = SensorObservationService(ds_str, xml=str(doc))\n\n elif xml_doc.tag == \"{http://www.opengis.net/sensorML/1.0.1}SensorML\":\n ds = SensorML(xml_doc)\n else:\n raise Exception(\"Unrecognized XML root element: %s\" % xml_doc.tag)\n else:\n # no doc? try the dataset constructor\n ds = Dataset(ds_str)\n\n return ds\n\n def scores(self, raw_scores):\n \"\"\"\n Transforms raw scores from a single checker into a fully tallied and grouped scoreline.\n \"\"\"\n grouped = self._group_raw(raw_scores)\n\n return (grouped)\n\n def _group_raw(self, raw_scores, cur=None, level=1):\n \"\"\"\n Internal recursive method to group raw scores into a cascading score summary.\n\n Only top level items are tallied for scores.\n \"\"\"\n\n def build_group(label=None, weight=None, value=None, sub=None):\n label = label\n weight = weight\n value = self._translate_value(value)\n sub = sub or []\n\n return Result(weight=weight,\n value=value,\n name=label,\n children=sub)\n\n def trim_groups(r):\n if isinstance(r.name, tuple) or isinstance(r.name, list):\n new_name = r.name[1:]\n else:\n new_name = []\n\n return Result(r.weight, r.value, new_name, r.msgs)\n\n # CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length\n # @TODO could have a problem here with scalar name, but probably still works\n terminal = [len(x.name) for x in raw_scores]\n if terminal == [0] * len(raw_scores):\n return []\n\n def group_func(r):\n \"\"\"\n Slices off first element (if list/tuple) of classification or just returns it if scalar.\n \"\"\"\n if isinstance(r.name, tuple) or isinstance(r.name, list):\n if len(r.name) == 0:\n retval = ''\n else:\n retval = r.name[0:1][0]\n else:\n retval = r.name\n return retval\n\n grouped = itertools.groupby(sorted(raw_scores, key=group_func),\n key=group_func)\n\n ret_val = []\n\n for k, v in grouped:\n\n v = list(v)\n\n cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)\n if len(cv):\n # if this node has children, max weight of children + sum of all the scores\n max_weight = max([x.weight for x in cv])\n sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))\n msgs = []\n else:\n max_weight = max([x.weight for x in v])\n sum_scores = tuple(map(sum, list(zip(*([self._translate_value(x.value) for x in v])))))\n msgs = sum([x.msgs for x in v], [])\n\n ret_val.append(Result(name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs))\n\n return ret_val\n\n def _translate_value(self, val):\n \"\"\"\n Turns shorthand True/False/None checks into full scores (1, 1)/(0, 1)/(0, 0).\n Leaves full scores alone.\n \"\"\"\n if val is True:\n return (1, 1)\n elif val is False:\n return (0, 1)\n elif val is None:\n return (0, 0)\n\n return val\n", "path": "compliance_checker/suite.py" } ]
[ { "content": "\"\"\"\nCompliance Checker suite runner\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport inspect\nimport itertools\nimport json\nfrom netCDF4 import Dataset\nfrom lxml import etree as ET\nfrom compliance_checker.base import fix_return_value, Result\nfrom owslib.sos import SensorObservationService\nfrom owslib.swe.sensor.sml import SensorML\ntry:\n from urlparse import urlparse\nexcept:\n from urllib.parse import urlparse\nfrom datetime import datetime\nimport requests\nimport textwrap\n\n\nclass CheckSuite(object):\n\n checkers = {} # Base dict of checker names to BaseCheck derived types, override this in your CheckSuite implementation\n\n @classmethod\n def load_all_available_checkers(cls):\n \"\"\"\n Helper method to retrieve all sub checker classes derived from various\n base classes.\n \"\"\"\n from pkg_resources import working_set\n for x in working_set.iter_entry_points('compliance_checker.suites'):\n try:\n xl = x.load()\n\n cls.checkers[xl.name] = xl\n except Exception as e:\n print(\"Could not load\", x, \":\", e, file=sys.stderr)\n\n def _get_checks(self, checkclass):\n \"\"\"\n Helper method to retreive check methods from a Checker class.\n\n The name of the methods in the Checker class should start with \"check_\" for this\n method to find them.\n \"\"\"\n meths = inspect.getmembers(checkclass, inspect.ismethod)\n return [x[1] for x in meths if x[0].startswith(\"check_\")]\n\n def _run_check(self, check_method, ds):\n val = check_method(ds)\n\n if isinstance(val, list):\n return [fix_return_value(v, check_method.__func__.__name__, check_method, check_method.__self__) for v in val]\n\n return [fix_return_value(val, check_method.__func__.__name__, check_method, check_method.__self__)]\n\n def _get_valid_checkers(self, ds, checker_names):\n \"\"\"\n Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and\n the user selected names.\n \"\"\"\n if len(checker_names) == 0:\n checker_names = list(self.checkers.keys())\n\n args = [(name, self.checkers[name]) for name in checker_names if name in self.checkers]\n valid = []\n\n all_checked = set([a[1] for a in args]) # only class types\n checker_queue = set(args)\n\n while len(checker_queue):\n name, a = checker_queue.pop()\n if type(ds) in a().supported_ds:\n valid.append((name, a))\n\n # add all to queue\n for subc in a.__subclasses__():\n if subc not in all_checked:\n all_checked.add(subc)\n checker_queue.add((name, subc))\n\n return valid\n\n def run(self, ds, *checker_names):\n \"\"\"\n Runs this CheckSuite on the dataset with all the passed Checker instances.\n\n Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.\n \"\"\"\n\n ret_val = {}\n checkers = self._get_valid_checkers(ds, checker_names)\n\n if len(checkers) == 0:\n print(\"No valid checkers found for tests '%s'\" % \",\".join(checker_names))\n\n for checker_name, checker_class in checkers:\n\n checker = checker_class()\n checker.setup(ds)\n\n checks = self._get_checks(checker)\n vals = []\n errs = {} # check method name -> (exc, traceback)\n\n for c in checks:\n try:\n vals.extend(self._run_check(c, ds))\n except Exception as e:\n errs[c.__func__.__name__] = (e, sys.exc_info()[2])\n\n # score the results we got back\n groups = self.scores(vals)\n\n ret_val[checker_name] = groups, errs\n\n return ret_val\n\n @classmethod\n def passtree(cls, groups, limit):\n for r in groups:\n if r.children:\n x = cls.passtree(r.children, limit)\n if r.weight >= limit and x is False:\n return False\n\n if r.weight >= limit and r.value[0] != r.value[1]:\n return False\n\n return True\n\n def build_structure(self, check_name, groups, source_name, limit=1):\n '''\n Compiles the checks, results and scores into an aggregate structure which looks like:\n\n {\n \"scored_points\": 396,\n \"low_count\": 0,\n \"possible_points\": 400,\n \"testname\": \"gliderdac\",\n \"medium_count\": 2,\n \"source_name\": \".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc\",\n \"high_count\": 0,\n \"all_priorities\" : [...],\n \"high_priorities\": [...],\n \"medium_priorities\" : [...],\n \"low_priorities\" : [...]\n }\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param source_name Source of the dataset, used for title\n '''\n aggregates = {}\n\n aggregates['scored_points'] = 0\n aggregates['possible_points'] = 0\n high_priorities = []\n medium_priorities = []\n low_priorities = []\n all_priorities = []\n\n aggregates['high_count'] = 0\n aggregates['medium_count'] = 0\n aggregates['low_count'] = 0\n\n def named_function(result):\n for child in result.children:\n all_priorities.append(child)\n named_function(child)\n\n # For each result, bin them into the appropriate category, put them all\n # into the all_priorities category and add up the point values\n for res in groups:\n if res.weight < limit:\n continue\n # If the result has 0 possible points, then it was not valid for\n # this dataset and contains no meaningful information\n if res.value[1] == 0:\n continue\n aggregates['scored_points'] += res.value[0]\n aggregates['possible_points'] += res.value[1]\n if res.weight == 3:\n high_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['high_count'] += 1\n elif res.weight == 2:\n medium_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['medium_count'] += 1\n else:\n low_priorities.append(res)\n if res.value[0] < res.value[1]:\n aggregates['low_count'] += 1\n all_priorities.append(res)\n # Some results have children\n # We don't render children inline with the top three tables, but we\n # do total the points and display the messages\n named_function(res)\n\n aggregates['high_priorities'] = high_priorities\n aggregates['medium_priorities'] = medium_priorities\n aggregates['low_priorities'] = low_priorities\n aggregates['all_priorities'] = all_priorities\n aggregates['testname'] = check_name\n aggregates['source_name'] = source_name\n return aggregates\n\n def json_output(self, check_name, groups, file_object, source_name, limit):\n '''\n Builds the results into a JSON structure and writes it to the file buffer.\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param output_filename Path to file to save output\n @param file_object A python file object where the output should be written to\n @param source_name Source of the dataset, used for title\n @param limit Integer value for limiting output\n '''\n aggregates = self.build_structure(check_name, groups, source_name, limit)\n aggregates = self.serialize(aggregates)\n json_string = json.dumps(aggregates, ensure_ascii=False)\n file_object.write(str(json_string))\n return\n\n def serialize(self, o):\n '''\n Returns a safe serializable object that can be serialized into JSON.\n\n @param o Python object to serialize\n '''\n if isinstance(o, (list, tuple)):\n return [self.serialize(i) for i in o]\n if isinstance(o, dict):\n return {k: self.serialize(v) for k, v in o.items()}\n if isinstance(o, datetime):\n return o.isoformat()\n if isinstance(o, Result):\n return self.serialize(o.serialize())\n return o\n\n def html_output(self, check_name, groups, file_object, source_name, limit):\n '''\n Renders an HTML file using Jinja2 and saves the output to the file specified.\n\n @param check_name The test which was run\n @param groups List of results from compliance checker\n @param output_filename Path to file to save output\n @param file_object A python file object where the output should be written to\n @param source_name Source of the dataset, used for title\n @param limit Integer value for limiting output\n '''\n from jinja2 import Environment, PackageLoader\n self.j2 = Environment(loader=PackageLoader('compliance_checker', 'data/templates'))\n template = self.j2.get_template('ccheck.html.j2')\n\n template_vars = self.build_structure(check_name, groups, source_name, limit)\n\n buf = template.render(**template_vars)\n\n file_object.write(buf)\n\n def get_points(self, groups, limit):\n score_list = []\n score_only_list = []\n\n for v in range(len(groups)):\n score_list.append([groups[v].name, groups[v].weight, groups[v].value,\n groups[v].children])\n if groups[v].weight >= limit:\n score_only_list.append(groups[v].value)\n\n points = [x[0] for x in score_only_list]\n out_of = [x[1] for x in score_only_list]\n\n points = sum(points)\n out_of = sum(out_of)\n # sorts lists into high/medium/low order\n score_list.sort(key=lambda x: x[1], reverse=True)\n\n return score_list, points, out_of\n\n def standard_output(self, limit, check_name, groups):\n \"\"\"\n Generates the Terminal Output for Standard cases\n\n Returns the dataset needed for the verbose output, as well as the failure flags.\n \"\"\"\n score_list, points, out_of = self.get_points(groups, limit)\n print('\\n')\n print(\"-\" * 80)\n print('{:^80}'.format(\"The dataset scored %r out of %r points\" % (points, out_of)))\n print('{:^80}'.format(\"during the %s check\" % check_name))\n print(\"-\" * 80)\n\n return [score_list, points, out_of]\n\n def non_verbose_output_generation(self, score_list, groups, limit, points, out_of):\n\n if points < out_of:\n print('{:^80}'.format(\"Scoring Breakdown:\"))\n print('\\n')\n priority_flag = 3\n for x in range(len(score_list)):\n if score_list[x][1] == 3 and limit <= 3 :\n if priority_flag == 3:\n print('{:^80}'.format(\"High Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 2 and limit <= 2 :\n if priority_flag == 2:\n print('\\n')\n print('{:^80}'.format(\"Medium Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 1 and limit == 1 :\n if priority_flag == 1:\n print('\\n')\n print('{:^80}'.format(\"Low Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))\n\n elif score_list[x][1] == 1 and limit == 1 and priority_flag == 2:\n print('{:^80}'.format('No medium priority tests present'))\n print('-' * 80)\n priority_flag -= 1\n # Catch All for pretty presentation\n if priority_flag == 2 and limit == 2:\n print('{:^80}'.format('No Medium priority tests present'))\n print('-' * 80)\n\n if priority_flag == 2 and limit == 1:\n print('{:^80}'.format('No Medium priority tests present'))\n print('-' * 80)\n print('')\n print('{:^80}'.format('No Low priority tests present'))\n print('-' * 80)\n\n if priority_flag == 1 and limit == 1:\n print('{:^80}'.format('No Low priority tests present'))\n print('-' * 80)\n\n print(\"\\n\" + \"\\n\" + '-' * 80)\n print('{:^80}'.format('Reasoning for the failed tests given below:'))\n print('\\n')\n print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))\n print(\"-\" * 80)\n self.reasoning_routine(groups, 0)\n\n else:\n print(\"All tests passed!\")\n\n def verbose_output_generation(self, groups, limit, points, out_of):\n '''\n Generates the Terminal Output for Verbose cases\n '''\n priority_flag = 3\n print('{:^80}'.format(\"Verbose Scoring Breakdown:\"), end=' ')\n self.print_routine(groups, 0, priority_flag)\n if points < out_of:\n print(\"\\n\" + \"\\n\" + '-' * 80)\n print('{:^80}'.format('Reasoning for the failed tests given below:'))\n print('\\n')\n print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))\n print(\"-\" * 80)\n self.reasoning_routine(groups, 0)\n\n pass\n\n def print_routine(self, list_of_results, indent, priority_flag):\n \"\"\"\n print routine performed\n \"\"\"\n def weight_func(r):\n \"\"\"\n Function that returns the weight, used for sorting by priority\n \"\"\"\n return r.weight\n\n # Sorting method used to properly sort the output by priority.\n grouped_sorted = []\n grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)\n\n # Loop over input\n for res in grouped_sorted:\n # If statements to print the proper Headings\n if res.weight == 3 and indent == 0 and priority_flag == 3:\n print('\\n')\n print('{:^80}'.format(\"High Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n\n priority_flag -= 1\n if res.weight == 2 and indent == 0 and priority_flag == 2:\n print('\\n')\n print('{:^80}'.format(\"Medium Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n\n priority_flag -= 1\n if res.weight == 1 and indent == 0 and priority_flag == 1:\n print('\\n')\n print('{:^80}'.format(\"Low Priority\"))\n print(\"-\" * 80)\n print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))\n priority_flag -= 1\n\n print('%-40s:%s:%s%6s/%1s' % ((indent * ' ' + res.name)[0:39], res.weight, indent * ' ', res.value[0], res.value[1]))\n if res.children:\n self.print_routine(res.children, indent + 1, priority_flag)\n\n def reasoning_routine(self, list_of_results, indent, line = True):\n \"\"\"\n print routine performed\n \"\"\"\n def weight_func(r):\n \"\"\"\n Function that returns the weight, used for sorting by priority\n \"\"\"\n return r.weight\n\n # Sorting method used to properly sort the output by priority.\n grouped_sorted = []\n grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)\n\n wrapper = textwrap.TextWrapper(initial_indent = '', width = 80, subsequent_indent = ' ' * 54)\n for res in grouped_sorted:\n if (res.value[0] != res.value[1]) and not res.msgs:\n print('%-39s:%1s:%6s/%2s : %s' % ((indent * ' ' + res.name)[0:39], res.weight, res.value[0], res.value[1], ' '))\n\n if (res.value[0] != res.value[1]) and res.msgs:\n print(wrapper.fill('%-39s:%1s:%6s/%2s : %s' % ((indent * ' ' + res.name)[0:39], res.weight, res.value[0], res.value[1], \", \".join(res.msgs))))\n\n if res.children:\n self.reasoning_routine(res.children, indent + 1, False)\n\n def load_dataset(self, ds_str):\n \"\"\"\n Helper method to load a dataset or SOS GC/DS url.\n \"\"\"\n ds = None\n\n # try to figure out if this is a local NetCDF Dataset, a remote one, or an SOS GC/DS url\n doc = None\n pr = urlparse(ds_str)\n if pr.netloc: # looks like a remote url\n rhead = requests.head(ds_str)\n\n # if we get a 400 here, it's likely a Dataset openable OpenDAP url\n if rhead.status_code == 400:\n pass\n elif rhead.status_code == 200 and rhead.headers['content-type'] == 'text/xml':\n # probably interesting, grab it\n r = requests.get(ds_str)\n r.raise_for_status()\n\n doc = r.text\n else:\n raise Exception(\"Could not understand response code %s and content-type %s\" % (rhead.status_code, rhead.headers.get('content-type', 'none')))\n else:\n def is_binary_string(bts):\n # do a cheap imitation of libmagic\n # http://stackoverflow.com/a/7392391/84732\n if sys.version_info >= (3, ):\n join_str = ''\n textchars = join_str.join(map(chr, [7, 8, 9, 10, 12, 13, 27] + list(range(0x20, 0x100)))).encode()\n #textchars = textchars.encode()\n else:\n # because of `unicode_literals` import, we need to convert\n # to a Py2 string/bytes\n join_str = str('')\n textchars = join_str.join(map(chr, [7, 8, 9, 10, 12, 13, 27] + list(range(0x20, 0x100))))\n return bool(bts.translate(None, textchars))\n\n with open(ds_str, 'rb') as f:\n first_chunk = f.read(1024)\n if is_binary_string(first_chunk):\n # likely netcdf file\n pass\n else:\n f.seek(0)\n doc = \"\".join(f.readlines())\n\n if doc is not None:\n xml_doc = ET.fromstring(str(doc))\n if xml_doc.tag == \"{http://www.opengis.net/sos/1.0}Capabilities\":\n ds = SensorObservationService(ds_str, xml=str(doc))\n\n elif xml_doc.tag == \"{http://www.opengis.net/sensorML/1.0.1}SensorML\":\n ds = SensorML(xml_doc)\n else:\n raise Exception(\"Unrecognized XML root element: %s\" % xml_doc.tag)\n else:\n # no doc? try the dataset constructor\n ds = Dataset(ds_str)\n\n return ds\n\n def scores(self, raw_scores):\n \"\"\"\n Transforms raw scores from a single checker into a fully tallied and grouped scoreline.\n \"\"\"\n grouped = self._group_raw(raw_scores)\n\n return (grouped)\n\n def _group_raw(self, raw_scores, cur=None, level=1):\n \"\"\"\n Internal recursive method to group raw scores into a cascading score summary.\n\n Only top level items are tallied for scores.\n \"\"\"\n\n def build_group(label=None, weight=None, value=None, sub=None):\n label = label\n weight = weight\n value = self._translate_value(value)\n sub = sub or []\n\n return Result(weight=weight,\n value=value,\n name=label,\n children=sub)\n\n def trim_groups(r):\n if isinstance(r.name, tuple) or isinstance(r.name, list):\n new_name = r.name[1:]\n else:\n new_name = []\n\n return Result(r.weight, r.value, new_name, r.msgs)\n\n # CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length\n # @TODO could have a problem here with scalar name, but probably still works\n terminal = [len(x.name) for x in raw_scores]\n if terminal == [0] * len(raw_scores):\n return []\n\n def group_func(r):\n \"\"\"\n Slices off first element (if list/tuple) of classification or just returns it if scalar.\n \"\"\"\n if isinstance(r.name, tuple) or isinstance(r.name, list):\n if len(r.name) == 0:\n retval = ''\n else:\n retval = r.name[0:1][0]\n else:\n retval = r.name\n return retval\n\n grouped = itertools.groupby(sorted(raw_scores, key=group_func),\n key=group_func)\n\n ret_val = []\n\n for k, v in grouped:\n\n v = list(v)\n\n cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)\n if len(cv):\n # if this node has children, max weight of children + sum of all the scores\n max_weight = max([x.weight for x in cv])\n sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))\n msgs = []\n else:\n max_weight = max([x.weight for x in v])\n sum_scores = tuple(map(sum, list(zip(*([self._translate_value(x.value) for x in v])))))\n msgs = sum([x.msgs for x in v], [])\n\n ret_val.append(Result(name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs))\n\n return ret_val\n\n def _translate_value(self, val):\n \"\"\"\n Turns shorthand True/False/None checks into full scores (1, 1)/(0, 1)/(0, 0).\n Leaves full scores alone.\n \"\"\"\n if val is True:\n return (1, 1)\n elif val is False:\n return (0, 1)\n elif val is None:\n return (0, 0)\n\n return val\n", "path": "compliance_checker/suite.py" } ]
diff --git a/compliance_checker/suite.py b/compliance_checker/suite.py index e2f683ef..bdb57921 100644 --- a/compliance_checker/suite.py +++ b/compliance_checker/suite.py @@ -280,6 +280,8 @@ def get_points(self, groups, limit): points = sum(points) out_of = sum(out_of) + # sorts lists into high/medium/low order + score_list.sort(key=lambda x: x[1], reverse=True) return score_list, points, out_of
e2nIEE__pandapower-136
3W transformer equivalent can have zero impedance For three-winding transformers with specific parameters it is possible to get equivalent transformers with zero impedance. While this is probably due to bad data, currently pandapower does not check this and the powerflow does not convergence with the warning RuntimeWarning: divide by zero encountered in true_divide Ysf = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance While this is a clear hint at the problem, finding the faulty element is not very straightforward. I suggest at least a check in `_trafo_df_from_trafo3w()` like the following: ``` if any(trafo_df.vsc_percent==0): raise UserWarning("Equivalent Trafo with zero impedance!") ``` One could also add a small Impedance to let the powerflow continue...
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nimport copy\nimport math\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandapower.auxiliary import get_values\nfrom pandapower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A, \\\n BR_R_ASYM, BR_X_ASYM, branch_cols\nfrom pandapower.idx_bus import BASE_KV, VM, VA\n\n\ndef _build_branch_ppc(net, ppc):\n \"\"\"\n Takes the empty ppc network and fills it with the branch values. The branch\n datatype will be np.complex 128 afterwards.\n\n .. note:: The order of branches in the ppc is:\n 1. Lines\n 2. Transformers\n 3. 3W Transformers (each 3W Transformer takes up three branches)\n 4. Impedances\n 5. Internal branch for extended ward\n\n **INPUT**:\n **net** -The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n\n \"\"\"\n length = _initialize_branch_lookup(net)\n lookup = net._pd2ppc_lookups[\"branch\"]\n mode = net._options[\"mode\"]\n ppc[\"branch\"] = np.zeros(shape=(length, branch_cols), dtype=np.complex128)\n if mode == \"sc\":\n from pandapower.shortcircuit.idx_brch import branch_cols_sc\n branch_sc = np.empty(shape=(length, branch_cols_sc), dtype=float)\n branch_sc.fill(np.nan)\n ppc[\"branch\"] = np.hstack((ppc[\"branch\"], branch_sc))\n ppc[\"branch\"][:, :13] = np.array([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -360, 360])\n if \"line\" in lookup:\n f, t = lookup[\"line\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B,\n BR_STATUS, RATE_A]] = _calc_line_parameter(net, ppc)\n if \"trafo\" in lookup:\n f, t = lookup[\"trafo\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS,\n RATE_A]] = _calc_trafo_parameter(net, ppc)\n if \"trafo3w\" in lookup:\n f, t = lookup[\"trafo3w\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A]] = \\\n _calc_trafo3w_parameter(net, ppc)\n if \"impedance\" in lookup:\n f, t = lookup[\"impedance\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_R_ASYM, BR_X_ASYM, BR_STATUS]] = \\\n _calc_impedance_parameter(net)\n if \"xward\" in lookup:\n f, t = lookup[\"xward\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_STATUS]] = _calc_xward_parameter(net, ppc)\n\n if \"switch\" in lookup:\n f, t = lookup[\"switch\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R]] = _calc_switch_parameter(net, ppc)\n\n\ndef _initialize_branch_lookup(net):\n r_switch = net[\"_options\"][\"r_switch\"]\n start = 0\n end = 0\n net._pd2ppc_lookups[\"branch\"] = {}\n for element in [\"line\", \"trafo\", \"trafo3w\", \"impedance\", \"xward\"]:\n if len(net[element]) > 0:\n if element == \"trafo3w\":\n end = start + len(net[element]) * 3\n else:\n end = start + len(net[element])\n net._pd2ppc_lookups[\"branch\"][element] = (start, end)\n start = end\n if r_switch > 0 and len(net._closed_bb_switches) > 0:\n end = start + net._closed_bb_switches.sum()\n net._pd2ppc_lookups[\"branch\"][\"switch\"] = (start, end)\n return end\n\n\ndef _calc_trafo3w_parameter(net, ppc):\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n trafo_df = _trafo_df_from_trafo3w(net)\n net._equiv_trafo3w = trafo_df\n\n temp_para = np.zeros(shape=(len(trafo_df), 9), dtype=np.complex128)\n temp_para[:, 0] = bus_lookup[(trafo_df[\"hv_bus\"].values).astype(int)]\n temp_para[:, 1] = bus_lookup[(trafo_df[\"lv_bus\"].values).astype(int)]\n temp_para[:, 2:7] = _calc_branch_values_from_trafo_df(net, ppc, trafo_df)\n temp_para[:, 7] = trafo_df[\"in_service\"].values\n if copy_constraints_to_ppc:\n max_load = trafo_df.max_loading_percent if \"max_loading_percent\" in trafo_df else 0\n temp_para[:, 8] = max_load / 100. * trafo_df.sn_kva / 1000.\n return temp_para\n\n\ndef _calc_line_parameter(net, ppc):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** -The pandapower format network\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n mode = net[\"_options\"][\"mode\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n line = net[\"line\"]\n fb = bus_lookup[line[\"from_bus\"].values]\n tb = bus_lookup[line[\"to_bus\"].values]\n length = line[\"length_km\"].values\n parallel = line[\"parallel\"].values\n baseR = np.square(ppc[\"bus\"][fb, BASE_KV]) / net.sn_kva * 1e3\n t = np.zeros(shape=(len(line.index), 7), dtype=np.complex128)\n\n t[:, 0] = fb\n t[:, 1] = tb\n\n t[:, 2] = line[\"r_ohm_per_km\"].values * length / baseR / parallel\n t[:, 3] = line[\"x_ohm_per_km\"].values * length / baseR / parallel\n if mode == \"sc\":\n if net[\"_options\"][\"case\"] == \"min\":\n t[:, 2] *= _end_temperature_correction_factor(net)\n else:\n b = (2 * net.f_hz * math.pi * line[\"c_nf_per_km\"].values * 1e-9 * baseR *\n length * parallel)\n g = line[\"g_us_per_km\"].values * 1e-6 * baseR * length * parallel\n t[:, 4] = b - g * 1j\n t[:, 5] = line[\"in_service\"].values\n if copy_constraints_to_ppc:\n max_load = line.max_loading_percent.values if \"max_loading_percent\" in line else 0\n vr = net.bus.vn_kv.loc[line[\"from_bus\"].values].values * np.sqrt(3)\n t[:, 6] = max_load / 100. * line.max_i_ka.values * line.df.values * parallel * vr\n return t\n\n\ndef _calc_trafo_parameter(net, ppc):\n '''\n Calculates the transformer parameter in per unit.\n\n **INPUT**:\n **net** - The pandapower format network\n\n **RETURN**:\n **temp_para** -\n Temporary transformer parameter. Which is a np.complex128\n Numpy array. with the following order:\n 0:hv_bus; 1:lv_bus; 2:r_pu; 3:x_pu; 4:b_pu; 5:tab, 6:shift\n '''\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n temp_para = np.zeros(shape=(len(net[\"trafo\"].index), 9), dtype=np.complex128)\n trafo = net[\"trafo\"]\n parallel = trafo[\"parallel\"].values\n temp_para[:, 0] = bus_lookup[trafo[\"hv_bus\"].values]\n temp_para[:, 1] = bus_lookup[trafo[\"lv_bus\"].values]\n temp_para[:, 2:7] = _calc_branch_values_from_trafo_df(net, ppc)\n temp_para[:, 7] = trafo[\"in_service\"].values\n if any(trafo.df.values <= 0):\n raise UserWarning(\"Rating factor df must be positive. Transformers with false \"\n \"rating factors: %s\" % trafo.query('df<=0').index.tolist())\n if copy_constraints_to_ppc:\n max_load = trafo.max_loading_percent.values if \"max_loading_percent\" in trafo else 0\n temp_para[:, 8] = max_load / 100. * trafo.sn_kva.values / 1000. * trafo.df.values * parallel\n return temp_para\n\n\ndef _calc_branch_values_from_trafo_df(net, ppc, trafo_df=None):\n \"\"\"\n Calculates the MAT/PYPOWER-branch-attributes from the pandapower trafo dataframe.\n\n PYPOWER and MATPOWER uses the PI-model to model transformers.\n This function calculates the resistance r, reactance x, complex susceptance c and the tap ratio\n according to the given parameters.\n\n .. warning:: This function returns the subsceptance b as a complex number\n **(-img + -re*i)**. MAT/PYPOWER is only intended to calculate the\n imaginary part of the subceptance. However, internally c is\n multiplied by i. By using subsceptance in this way, it is possible\n to consider the ferromagnetic loss of the coil. Which would\n otherwise be neglected.\n\n\n .. warning:: Tab switches effect calculation as following:\n On **high-voltage** side(=1) -> only **tab** gets adapted.\n On **low-voltage** side(=2) -> **tab, x, r** get adapted.\n This is consistent with Sincal.\n The Sincal method in this case is questionable.\n\n\n **INPUT**:\n **pd_trafo** - The pandapower format Transformer Dataframe.\n The Transformer modell will only readfrom pd_net\n\n **RETURN**:\n **temp_para** - Temporary transformer parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n if trafo_df is None:\n trafo_df = net[\"trafo\"]\n parallel = trafo_df[\"parallel\"].values\n vn_lv = get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"lv_bus\"].values, bus_lookup)\n ### Construct np.array to parse results in ###\n # 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n temp_para = np.zeros(shape=(len(trafo_df), 5), dtype=np.complex128)\n vn_trafo_hv, vn_trafo_lv, shift = _calc_tap_from_dataframe(net, trafo_df)\n ratio = _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_trafo_hv, vn_trafo_lv,\n bus_lookup)\n r, x, y = _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, net.sn_kva)\n temp_para[:, 0] = r / parallel\n temp_para[:, 1] = x / parallel\n temp_para[:, 2] = y * parallel\n temp_para[:, 3] = ratio\n temp_para[:, 4] = shift\n return temp_para\n\n\ndef _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, sn_kva):\n mode = net[\"_options\"][\"mode\"]\n trafo_model = net[\"_options\"][\"trafo_model\"]\n\n r, x = _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva)\n if mode == \"sc\":\n y = 0\n if trafo_df.equals(net.trafo):\n from pandapower.shortcircuit.idx_bus import C_MAX\n bus_lookup = net._pd2ppc_lookups[\"bus\"]\n cmax = net._ppc[\"bus\"][bus_lookup[net.trafo.lv_bus.values], C_MAX]\n kt = _transformer_correction_factor(trafo_df.vsc_percent, trafo_df.vscr_percent,\n trafo_df.sn_kva, cmax)\n r *= kt\n x *= kt\n else:\n y = _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva)\n if trafo_model == \"pi\":\n return r, x, y\n elif trafo_model == \"t\":\n return _wye_delta(r, x, y)\n else:\n raise ValueError(\"Unkonwn Transformer Model %s - valid values ar 'pi' or 't'\" % trafo_model)\n\n\ndef _wye_delta(r, x, y):\n \"\"\"\n 20.05.2016 added by Lothar Löwer\n\n Calculate transformer Pi-Data based on T-Data\n\n \"\"\"\n tidx = np.where(y != 0)\n za_star = (r[tidx] + x[tidx] * 1j) / 2\n zc_star = -1j / y[tidx]\n zSum_triangle = za_star * za_star + 2 * za_star * zc_star\n zab_triangle = zSum_triangle / zc_star\n zbc_triangle = zSum_triangle / za_star\n r[tidx] = zab_triangle.real\n x[tidx] = zab_triangle.imag\n y[tidx] = -2j / zbc_triangle\n return r, x, y\n\n\ndef _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva):\n \"\"\"\n Calculate the subsceptance y from the transformer dataframe.\n\n INPUT:\n\n **trafo** (Dataframe) - The dataframe in net.trafo\n which contains transformer calculation values.\n\n OUTPUT:\n **subsceptance** (1d array, np.complex128) - The subsceptance in pu in\n the form (-b_img, -b_real)\n \"\"\"\n baseR = np.square(vn_lv) / sn_kva * 1e3\n\n ### Calculate subsceptance ###\n vnl_squared = trafo_df[\"vn_lv_kv\"].values ** 2\n b_real = trafo_df[\"pfe_kw\"].values / (1000. * vnl_squared) * baseR\n i0 = trafo_df[\"i0_percent\"].values\n pfe = trafo_df[\"pfe_kw\"].values\n sn = trafo_df[\"sn_kva\"].values\n b_img = (i0 / 100. * sn / 1000.) ** 2 - (pfe / 1000.) ** 2\n\n b_img[b_img < 0] = 0\n b_img = np.sqrt(b_img) * baseR / vnl_squared\n y = - b_real * 1j - b_img * np.sign(i0)\n if \"lv\" in trafo_df[\"tp_side\"].values:\n return y / np.square(vn_trafo_lv / trafo_df[\"vn_lv_kv\"].values)\n else:\n return y\n\n\ndef _calc_tap_from_dataframe(net, trafo_df):\n \"\"\"\n Adjust the nominal voltage vnh and vnl to the active tab position \"tp_pos\".\n If \"side\" is 1 (high-voltage side) the high voltage vnh is adjusted.\n If \"side\" is 2 (low-voltage side) the low voltage vnl is adjusted\n\n INPUT:\n **net** - The pandapower format network\n\n **trafo** (Dataframe) - The dataframe in pd_net[\"structure\"][\"trafo\"]\n which contains transformer calculation values.\n\n OUTPUT:\n **vn_hv_kv** (1d array, float) - The adusted high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted low voltages\n\n **trafo_shift** (1d array, float) - phase shift angle\n\n \"\"\"\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\n mode = net[\"_options\"][\"mode\"]\n trafo_shift = trafo_df[\"shift_degree\"].values.astype(float) if calculate_voltage_angles else \\\n np.zeros(len(trafo_df))\n vnh = copy.copy(trafo_df[\"vn_hv_kv\"].values.astype(float))\n vnl = copy.copy(trafo_df[\"vn_lv_kv\"].values.astype(float))\n if mode == \"sc\":\n return vnh, vnl, trafo_shift\n\n tp_diff = trafo_df[\"tp_pos\"].values - trafo_df[\"tp_mid\"].values\n\n cos = lambda x: np.cos(np.deg2rad(x))\n sin = lambda x: np.sin(np.deg2rad(x))\n arctan = lambda x: np.rad2deg(np.arctan(x))\n\n for side, vn, direction in [(\"hv\", vnh, 1), (\"lv\", vnl, -1)]:\n phase_shifters = trafo_df[\"tp_phase_shifter\"].values & (trafo_df[\"tp_side\"].values == side)\n tap_complex = (np.isfinite(trafo_df[\"tp_st_percent\"].values) &\n np.isfinite(trafo_df[\"tp_pos\"].values) &\n (trafo_df[\"tp_side\"].values == side) &\n ~phase_shifters)\n if np.any(tap_complex):\n tp_steps = trafo_df[\"tp_st_percent\"].values[tap_complex] * tp_diff[tap_complex] / 100\n tp_angles = np.nan_to_num(trafo_df[\"tp_st_degree\"].values[tap_complex])\n u1 = vn[tap_complex]\n du = u1 * np.nan_to_num(tp_steps)\n vn[tap_complex] = np.sqrt((u1 + du * cos(tp_angles)) ** 2 + (du * sin(tp_angles)) ** 2)\n trafo_shift[tap_complex] += (arctan(direction * du * sin(tp_angles) /\n (u1 + du * cos(tp_angles))))\n if np.any(phase_shifters):\n trafo_shift[phase_shifters] += (direction * tp_diff[phase_shifters] *\n trafo_df[\"tp_st_degree\"].values[phase_shifters])\n return vnh, vnl, trafo_shift\n\n\ndef _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva):\n \"\"\"\n Calculates (Vectorized) the resitance and reactance according to the\n transformer values\n\n \"\"\"\n tap_lv = np.square(vn_trafo_lv / vn_lv) * sn_kva # adjust for low voltage side voltage converter\n sn_trafo_kva = trafo_df.sn_kva.values\n z_sc = trafo_df[\"vsc_percent\"].values / 100. / sn_trafo_kva * tap_lv\n r_sc = trafo_df[\"vscr_percent\"].values / 100. / sn_trafo_kva * tap_lv\n x_sc = np.sign(z_sc) * np.sqrt(z_sc ** 2 - r_sc ** 2)\n return r_sc, x_sc\n\n\ndef _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_hv_kv, vn_lv_kv, bus_lookup):\n \"\"\"\n Calculates (Vectorized) the off nominal tap ratio::\n\n (vn_hv_kv / vn_lv_kv) / (ub1_in_kv / ub2_in_kv)\n\n INPUT:\n **net** (Dataframe) - The net for which to calc the tap ratio.\n\n **vn_hv_kv** (1d array, float) - The adjusted nominal high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted nominal low voltages\n\n OUTPUT:\n **tab** (1d array, float) - The off-nominal tap ratio\n \"\"\"\n # Calculating tab (trasformer off nominal turns ratio)\n tap_rat = vn_hv_kv / vn_lv_kv\n nom_rat = get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"hv_bus\"].values, bus_lookup) / \\\n get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"lv_bus\"].values, bus_lookup)\n return tap_rat / nom_rat\n\n\ndef z_br_to_bus(z, s):\n return s[0] * np.array([z[0] / min(s[0], s[1]), z[1] /\n min(s[1], s[2]), z[2] / min(s[0], s[2])])\n\n\ndef wye_delta(zbr_n, s):\n return .5 * s / s[0] * np.array([(zbr_n[0] + zbr_n[2] - zbr_n[1]),\n (zbr_n[1] + zbr_n[0] - zbr_n[2]),\n (zbr_n[2] + zbr_n[1] - zbr_n[0])])\n\n\ndef _trafo_df_from_trafo3w(net):\n mode = net._options[\"mode\"]\n loss_location = net._options[\"trafo3w_losses\"].lower()\n trafos2w = {}\n nr_trafos = len(net[\"trafo3w\"])\n tap_variables = (\"tp_pos\", \"tp_mid\", \"tp_max\", \"tp_min\", \"tp_st_percent\", \"tp_st_degree\")\n i = 0\n for ttab in net[\"trafo3w\"].itertuples():\n vsc = np.array([ttab.vsc_hv_percent, ttab.vsc_mv_percent, ttab.vsc_lv_percent], dtype=float)\n vscr = np.array([ttab.vscr_hv_percent, ttab.vscr_mv_percent, ttab.vscr_lv_percent], dtype=float)\n sn = np.array([ttab.sn_hv_kva, ttab.sn_mv_kva, ttab.sn_lv_kva])\n vsc_2w_delta = z_br_to_bus(vsc, sn)\n vscr_2w_delta = z_br_to_bus(vscr, sn)\n if mode == \"sc\":\n kt = _transformer_correction_factor(vsc, vscr, sn, 1.1)\n vsc_2w_delta *= kt\n vscr_2w_delta *= kt\n vsci_2w_delta = np.sqrt(vsc_2w_delta ** 2 - vscr_2w_delta ** 2)\n vscr_2w = wye_delta(vscr_2w_delta, sn)\n vsci_2w = wye_delta(vsci_2w_delta, sn)\n vsc_2w = np.sign(vsci_2w) * np.sqrt(vsci_2w ** 2 + vscr_2w ** 2)\n taps = [dict((tv, np.nan) for tv in tap_variables) for _ in range(3)]\n for k in range(3):\n taps[k][\"tp_side\"] = None\n\n trafo3w_tap_at_star_point = ttab.tap_at_star_point\n\n if pd.notnull(ttab.tp_side):\n if ttab.tp_side == \"hv\" or ttab.tp_side == 0:\n tp_trafo = 0\n elif ttab.tp_side == \"mv\":\n tp_trafo = 1\n elif ttab.tp_side == \"lv\":\n tp_trafo = 2\n for tv in tap_variables:\n taps[tp_trafo][tv] = getattr(ttab,tv)\n # consider where the tap is located - at the bus or at star point of the 3W-transformer\n if not trafo3w_tap_at_star_point:\n taps[tp_trafo][\"tp_side\"] = \"hv\" if tp_trafo == 0 else \"lv\"\n else:\n taps[tp_trafo][\"tp_side\"] = \"lv\" if tp_trafo == 0 else \"hv\"\n taps[tp_trafo][\"tp_st_degree\"] += 180\n\n max_load = ttab.max_loading_percent if \"max_loading_percent\" in ttab._fields else 0\n\n trafos2w[i] = {\"hv_bus\": ttab.hv_bus, \"lv_bus\": ttab.ad_bus, \"sn_kva\": ttab.sn_hv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_hv_kv,\n \"vscr_percent\": vscr_2w[0], \"vsc_percent\": vsc_2w[0],\n \"pfe_kw\": ttab.pfe_kw if loss_location == \"hv\" else 0,\n \"i0_percent\": ttab.i0_percent if loss_location == \"hv\" else 0,\n \"tp_side\": taps[0][\"tp_side\"],\n \"tp_mid\": taps[0][\"tp_mid\"], \"tp_max\": taps[0][\"tp_max\"],\n \"tp_min\": taps[0][\"tp_min\"], \"tp_pos\": taps[0][\"tp_pos\"],\n \"tp_st_percent\": taps[0][\"tp_st_percent\"],\n \"tp_st_degree\": taps[0][\"tp_st_degree\"], \"tp_phase_shifter\": False,\n \"parallel\": 1, \"df\": 1, \"in_service\": ttab.in_service, \"shift_degree\": 0,\n \"max_loading_percent\": max_load}\n trafos2w[i + nr_trafos] = {\n \"hv_bus\": ttab.ad_bus, \"lv_bus\": ttab.mv_bus, \"sn_kva\": ttab.sn_mv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_mv_kv, \"vscr_percent\": vscr_2w[1],\n \"vsc_percent\": vsc_2w[1], \"pfe_kw\": ttab.pfe_kw if loss_location == \"mv\" else 0,\n \"i0_percent\": ttab.i0_percent * ttab.sn_hv_kva / ttab.sn_mv_kva\n if loss_location == \"mv\" else 0,\n \"tp_side\": taps[1][\"tp_side\"], \"tp_mid\": taps[1][\"tp_mid\"],\n \"tp_max\": taps[1][\"tp_max\"], \"tp_min\": taps[1][\"tp_min\"],\n \"tp_pos\": taps[1][\"tp_pos\"], \"tp_st_percent\": taps[1][\"tp_st_percent\"],\n \"tp_st_degree\": taps[1][\"tp_st_degree\"], \"tp_phase_shifter\": False, \"parallel\": 1,\n \"df\": 1, \"in_service\": ttab.in_service, \"shift_degree\": ttab.shift_mv_degree,\n \"max_loading_percent\": max_load}\n trafos2w[i + 2 * nr_trafos] = {\n \"hv_bus\": ttab.ad_bus, \"lv_bus\": ttab.lv_bus, \"sn_kva\": ttab.sn_lv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_lv_kv, \"vscr_percent\": vscr_2w[2],\n \"vsc_percent\": vsc_2w[2], \"pfe_kw\": ttab.pfe_kw if loss_location == \"lv\" else 0,\n \"i0_percent\": ttab.i0_percent * ttab.sn_hv_kva / ttab.sn_lv_kva\n if loss_location == \"lv\" else 0, \"tp_side\": taps[2][\"tp_side\"],\n \"tp_mid\": taps[2][\"tp_mid\"], \"tp_max\": taps[2][\"tp_max\"],\n \"tp_min\": taps[2][\"tp_min\"], \"tp_pos\": taps[2][\"tp_pos\"],\n \"tp_st_percent\": taps[2][\"tp_st_percent\"], \"tp_st_degree\": taps[2][\"tp_st_degree\"],\n \"tp_phase_shifter\": False, \"parallel\": 1, \"df\": 1, \"in_service\": ttab.in_service,\n \"shift_degree\": ttab.shift_lv_degree, \"max_loading_percent\": max_load}\n i += 1\n\n trafo_df = pd.DataFrame.from_dict(trafos2w, orient=\"index\")\n return trafo_df\n\n\ndef _calc_impedance_parameter(net):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n t = np.zeros(shape=(len(net[\"impedance\"].index), 7), dtype=np.complex128)\n sn_impedance = net[\"impedance\"][\"sn_kva\"].values\n sn_net = net.sn_kva\n rij = net[\"impedance\"][\"rft_pu\"].values\n xij = net[\"impedance\"][\"xft_pu\"].values\n rji = net[\"impedance\"][\"rtf_pu\"].values\n xji = net[\"impedance\"][\"xtf_pu\"].values\n t[:, 0] = bus_lookup[net[\"impedance\"][\"from_bus\"].values]\n t[:, 1] = bus_lookup[net[\"impedance\"][\"to_bus\"].values]\n t[:, 2] = rij / sn_impedance * sn_net\n t[:, 3] = xij / sn_impedance * sn_net\n t[:, 4] = (rji - rij) / sn_impedance * sn_net\n t[:, 5] = (xji - xij) / sn_impedance * sn_net\n t[:, 6] = net[\"impedance\"][\"in_service\"].values\n return t\n\n\ndef _calc_xward_parameter(net, ppc):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n baseR = np.square(get_values(ppc[\"bus\"][:, BASE_KV], net[\"xward\"][\"bus\"].values, bus_lookup)) / \\\n net.sn_kva * 1e3\n t = np.zeros(shape=(len(net[\"xward\"].index), 5), dtype=np.complex128)\n xw_is = net[\"_is_elements\"][\"xward\"]\n t[:, 0] = bus_lookup[net[\"xward\"][\"bus\"].values]\n t[:, 1] = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\n t[:, 2] = net[\"xward\"][\"r_ohm\"] / baseR\n t[:, 3] = net[\"xward\"][\"x_ohm\"] / baseR\n t[:, 4] = xw_is\n return t\n\n\ndef _gather_branch_switch_info(bus, branch_id, branch_type, net):\n # determine at which end the switch is located\n # 1 = to-bus/lv-bus; 0 = from-bus/hv-bus\n branch_id = int(branch_id)\n if branch_type == \"l\":\n branch_bus = net[\"line\"][\"to_bus\"].at[branch_id]\n is_to_bus = int(branch_bus == bus)\n return is_to_bus, bus, net[\"line\"].index.get_loc(branch_id)\n else:\n branch_bus = net[\"trafo\"][\"lv_bus\"].at[branch_id]\n is_to_bus = int(branch_bus == bus)\n return is_to_bus, bus, net[\"trafo\"].index.get_loc(branch_id)\n\n\ndef _switch_branches(net, ppc):\n from pandapower.shortcircuit.idx_bus import C_MIN, C_MAX\n \"\"\"\n Updates the ppc[\"branch\"] matrix with the changed from or to values\n according of the status of switches\n\n **INPUT**:\n **pd_net** - The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n connectivity_check = net[\"_options\"][\"check_connectivity\"]\n mode = net._options[\"mode\"]\n # get in service elements\n _is_elements = net[\"_is_elements\"]\n bus_is_idx = _is_elements['bus_is_idx']\n lines_is_index = _is_elements[\"line_is_idx\"]\n\n # opened bus line switches\n slidx = (net[\"switch\"][\"closed\"].values == 0) \\\n & (net[\"switch\"][\"et\"].values == \"l\")\n\n # check if there are multiple opened switches at a line (-> set line out of service)\n sw_elem = net['switch'][slidx][\"element\"].values\n m = np.zeros_like(sw_elem, dtype=bool)\n m[np.unique(sw_elem, return_index=True)[1]] = True\n\n # if non unique elements are in sw_elem (= multiple opened bus line switches)\n if np.count_nonzero(m) < len(sw_elem):\n if 'line' not in _is_elements:\n get_is_lines(net)\n lines_is = _is_elements['line']\n lines_to_delete = [idx for idx in sw_elem[~m] if idx in lines_is.index]\n\n from_bus = lines_is.loc[lines_to_delete].from_bus.values\n to_bus = lines_is.loc[lines_to_delete].to_bus.values\n # check if branch is already out of service -> ignore switch\n from_bus = from_bus[~np.isnan(from_bus)].astype(int)\n to_bus = to_bus[~np.isnan(to_bus)].astype(int)\n\n # set branch in ppc out of service if from and to bus are at a line which is in service\n if not connectivity_check and from_bus.size and to_bus.size:\n # get from and to buses of these branches\n ppc_from = bus_lookup[from_bus]\n ppc_to = bus_lookup[to_bus]\n ppc_idx = np.in1d(ppc['branch'][:, 0], ppc_from) \\\n & np.in1d(ppc['branch'][:, 1], ppc_to)\n ppc[\"branch\"][ppc_idx, BR_STATUS] = 0\n\n # drop from in service lines as well\n lines_is = lines_is.drop(lines_to_delete)\n _is_elements[\"line_is_idx\"] = lines_is.index\n\n # opened switches at in service lines\n slidx = slidx \\\n & (np.in1d(net[\"switch\"][\"element\"].values, lines_is_index)) \\\n & (np.in1d(net[\"switch\"][\"bus\"].values, bus_is_idx))\n nlo = np.count_nonzero(slidx)\n\n stidx = (net.switch[\"closed\"].values == 0) & (net.switch[\"et\"].values == \"t\")\n nto = np.count_nonzero(stidx)\n\n if (nlo + nto) > 0:\n n_bus = len(ppc[\"bus\"])\n\n if nlo:\n future_buses = [ppc[\"bus\"]]\n line_switches = net[\"switch\"].loc[slidx]\n\n # determine on which side the switch is located\n mapfunc = partial(_gather_branch_switch_info, branch_type=\"l\", net=net)\n ls_info = list(map(mapfunc,\n line_switches[\"bus\"].values,\n line_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at to_bus, 0 else\n # 1: bus of the switch\n # 2: position of the line a switch is connected to\n ls_info = np.array(ls_info, dtype=int)\n\n # build new buses\n new_ls_buses = np.zeros(shape=(nlo, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus, n_bus + nlo)\n # the newly created buses\n new_ls_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ls_buses[:, 0] = new_indices\n new_ls_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ls_info[:, 1], bus_lookup)\n # set voltage of new buses to voltage on other branch end\n to_buses = ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1].real.astype(int)\n from_buses = ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0].real \\\n .astype(int)\n\n if len(to_buses):\n ix = ls_info[:, 0] == 1\n new_ls_buses[ix, VM] = ppc[\"bus\"][to_buses, VM]\n new_ls_buses[ix, VA] = ppc[\"bus\"][to_buses, VA]\n if mode == \"sc\":\n new_ls_buses[ix, C_MAX] = ppc[\"bus\"][to_buses, C_MAX]\n new_ls_buses[ix, C_MIN] = ppc[\"bus\"][to_buses, C_MIN]\n\n if len(from_buses):\n ix = ls_info[:, 0] == 0\n new_ls_buses[ix, VM] = ppc[\"bus\"][from_buses, VM]\n new_ls_buses[ix, VA] = ppc[\"bus\"][from_buses, VA]\n if mode == \"sc\":\n new_ls_buses[ix, C_MAX] = ppc[\"bus\"][from_buses, C_MAX]\n new_ls_buses[ix, C_MIN] = ppc[\"bus\"][from_buses, C_MIN]\n\n future_buses.append(new_ls_buses)\n # re-route the end of lines to a new bus\n ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1] = \\\n new_indices[ls_info[:, 0].astype(bool)]\n ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0] = \\\n new_indices[np.logical_not(ls_info[:, 0])]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n if nto:\n future_buses = [ppc[\"bus\"]]\n trafo_switches = net[\"switch\"].loc[stidx]\n\n # determine on which side the switch is located\n mapfunc = partial(_gather_branch_switch_info, branch_type=\"t\", net=net)\n ts_info = list(map(mapfunc,\n trafo_switches[\"bus\"].values,\n trafo_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at lv_bus, 0 else\n # 1: bus of the switch\n # 2: position of the trafo a switch is connected to\n ts_info = np.array(ts_info, dtype=int)\n\n # build new buses\n new_ts_buses = np.zeros(shape=(nto, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus + nlo, n_bus + nlo + nto)\n new_ts_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ts_buses[:, 0] = new_indices\n new_ts_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ts_info[:, 1], bus_lookup)\n # set voltage of new buses to voltage on other branch end\n to_buses = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], 1].real.astype(int)\n from_buses = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], 0].real \\\n .astype(int)\n\n # set newly created buses to voltage on other side of\n if len(to_buses):\n ix = ts_info[:, 0] == 1\n taps = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], VA].real\n shift = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], BASE_KV].real\n new_ts_buses[ix, VM] = ppc[\"bus\"][to_buses, VM] * taps\n new_ts_buses[ix, VA] = ppc[\"bus\"][to_buses, VA] + shift\n if mode == \"sc\":\n new_ts_buses[ix, C_MAX] = ppc[\"bus\"][to_buses, C_MAX]\n new_ts_buses[ix, C_MIN] = 0.95 # ppc[\"bus\"][to_buses, C_MIN]\n if len(from_buses):\n ix = ts_info[:, 0] == 0\n taps = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], VA].real\n shift = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], BASE_KV].real\n new_ts_buses[ix, VM] = ppc[\"bus\"][from_buses, VM] * taps\n new_ts_buses[ix, VA] = ppc[\"bus\"][from_buses, VA] + shift\n if mode == \"sc\":\n new_ts_buses[ix, C_MAX] = ppc[\"bus\"][from_buses, C_MAX]\n new_ts_buses[ix, C_MIN] = ppc[\"bus\"][from_buses, C_MIN]\n future_buses.append(new_ts_buses)\n\n # re-route the hv/lv-side of the trafo to a new bus\n # (trafo entries follow line entries)\n at_lv_bus = ts_info[:, 0].astype(bool)\n at_hv_bus = ~at_lv_bus\n ppc[\"branch\"][len(net.line) + ts_info[at_lv_bus, 2], 1] = \\\n new_indices[at_lv_bus]\n ppc[\"branch\"][len(net.line) + ts_info[at_hv_bus, 2], 0] = \\\n new_indices[at_hv_bus]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n\ndef _branches_with_oos_buses(net, ppc):\n \"\"\"\n Updates the ppc[\"branch\"] matrix with the changed from or to values\n if the branch is connected to an out of service bus\n\n Adds auxiliary buses if branch is connected to an out of service bus\n Sets branch out of service if connected to two out of service buses\n\n **INPUT**:\n **n** - The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n **bus_is** - The in service buses\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n # get in service elements\n _is_elements = net[\"_is_elements\"]\n bus_is_idx = _is_elements['bus_is_idx']\n line_is_idx = _is_elements['line_is_idx']\n\n n_oos_buses = len(net['bus']) - len(bus_is_idx)\n\n # only filter lines at oos buses if oos buses exists\n if n_oos_buses > 0:\n n_bus = len(ppc[\"bus\"])\n future_buses = [ppc[\"bus\"]]\n # out of service buses\n bus_oos = np.setdiff1d(net['bus'].index.values, bus_is_idx)\n # from buses of line\n line_buses = net[\"line\"][[\"from_bus\", \"to_bus\"]].loc[line_is_idx].values\n f_bus = line_buses[:, 0]\n t_bus = line_buses[:, 1]\n\n # determine on which side of the line the oos bus is located\n mask_from = np.in1d(f_bus, bus_oos)\n mask_to = np.in1d(t_bus, bus_oos)\n\n mask_and = mask_to & mask_from\n if np.any(mask_and):\n mask_from[mask_and] = False\n mask_to[mask_and] = False\n\n # get lines that are connected to oos bus at exactly one side\n # buses that are connected to two oos buses will be removed by ext2int\n mask_or = mask_to | mask_from\n # check whether buses are connected to line\n oos_buses_at_lines = np.r_[f_bus[mask_from], t_bus[mask_to]]\n n_oos_buses_at_lines = len(oos_buses_at_lines)\n\n # only if oos_buses are at lines (they could be isolated as well)\n if n_oos_buses_at_lines > 0:\n ls_info = np.zeros((n_oos_buses_at_lines, 3), dtype=int)\n ls_info[:, 0] = mask_to[mask_or] & ~mask_from[mask_or]\n ls_info[:, 1] = oos_buses_at_lines\n ls_info[:, 2] = np.nonzero(np.in1d(net['line'].index, line_is_idx[mask_or]))[0]\n\n # ls_info = list(map(mapfunc,\n # line_switches[\"bus\"].values,\n # line_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at to_bus, 0 else\n # 1: bus of the switch\n # 2: position of the line a switch is connected to\n # ls_info = np.array(ls_info, dtype=int)\n\n # build new buses\n new_ls_buses = np.zeros(shape=(n_oos_buses_at_lines, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus, n_bus + n_oos_buses_at_lines)\n # the newly created buses\n new_ls_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ls_buses[:, 0] = new_indices\n new_ls_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ls_info[:, 1], bus_lookup)\n\n future_buses.append(new_ls_buses)\n\n # re-route the end of lines to a new bus\n ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1] = \\\n new_indices[ls_info[:, 0].astype(bool)]\n ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0] = \\\n new_indices[np.logical_not(ls_info[:, 0])]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n\ndef _update_trafo_trafo3w_ppc(net, ppc):\n \"\"\"\n Updates the trafo and trafo3w values when reusing the ppc between two powerflows\n\n :param net: pandapower net\n :param ppc: pypower format\n :return: ppc with updates values\n \"\"\"\n line_end = len(net[\"line\"])\n trafo_end = line_end + len(net[\"trafo\"])\n trafo3w_end = trafo_end + len(net[\"trafo3w\"]) * 3\n\n if trafo_end > line_end:\n ppc[\"branch\"][line_end:trafo_end,\n [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A]] = \\\n _calc_trafo_parameter(net, ppc)\n if trafo3w_end > trafo_end:\n ppc[\"branch\"][trafo_end:trafo3w_end, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS]] = \\\n _calc_trafo3w_parameter(net, ppc)\n\n\ndef _calc_switch_parameter(net, ppc):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** -The pandapower format network\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n r_switch = net[\"_options\"][\"r_switch\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n switch = net.switch[net._closed_bb_switches]\n fb = bus_lookup[switch[\"bus\"].values]\n tb = bus_lookup[switch[\"element\"].values]\n baseR = np.square(ppc[\"bus\"][fb, BASE_KV]) / net.sn_kva * 1e3\n t = np.zeros(shape=(len(switch), 3), dtype=np.complex128)\n\n t[:, 0] = fb\n t[:, 1] = tb\n\n t[:, 2] = r_switch / baseR\n return t\n\n\ndef _end_temperature_correction_factor(net):\n if \"endtemp_degree\" not in net.line:\n raise UserWarning(\"Specify end temperature for lines in net.endtemp_degree\")\n return (1 + .004 * (net.line.endtemp_degree.values.astype(float) - 20)) # formula from standard\n\n\ndef _transformer_correction_factor(vsc, vscr, sn, cmax):\n sn = sn / 1000.\n zt = vsc / 100 / sn\n rt = vscr / 100 / sn\n xt = np.sqrt(zt ** 2 - rt ** 2)\n kt = 0.95 * cmax / (1 + .6 * xt * sn)\n return kt\n\n\ndef get_is_lines(net):\n _is_elements = net[\"_is_elements\"]\n _is_elements[\"line\"] = net[\"line\"][net[\"line\"][\"in_service\"].values.astype(bool)]\n", "path": "pandapower/build_branch.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nimport copy\nimport math\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandapower.auxiliary import get_values\nfrom pandapower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A, \\\n BR_R_ASYM, BR_X_ASYM, branch_cols\nfrom pandapower.idx_bus import BASE_KV, VM, VA\n\n\ndef _build_branch_ppc(net, ppc):\n \"\"\"\n Takes the empty ppc network and fills it with the branch values. The branch\n datatype will be np.complex 128 afterwards.\n\n .. note:: The order of branches in the ppc is:\n 1. Lines\n 2. Transformers\n 3. 3W Transformers (each 3W Transformer takes up three branches)\n 4. Impedances\n 5. Internal branch for extended ward\n\n **INPUT**:\n **net** -The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n\n \"\"\"\n length = _initialize_branch_lookup(net)\n lookup = net._pd2ppc_lookups[\"branch\"]\n mode = net._options[\"mode\"]\n ppc[\"branch\"] = np.zeros(shape=(length, branch_cols), dtype=np.complex128)\n if mode == \"sc\":\n from pandapower.shortcircuit.idx_brch import branch_cols_sc\n branch_sc = np.empty(shape=(length, branch_cols_sc), dtype=float)\n branch_sc.fill(np.nan)\n ppc[\"branch\"] = np.hstack((ppc[\"branch\"], branch_sc))\n ppc[\"branch\"][:, :13] = np.array([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -360, 360])\n if \"line\" in lookup:\n f, t = lookup[\"line\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B,\n BR_STATUS, RATE_A]] = _calc_line_parameter(net, ppc)\n if \"trafo\" in lookup:\n f, t = lookup[\"trafo\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS,\n RATE_A]] = _calc_trafo_parameter(net, ppc)\n if \"trafo3w\" in lookup:\n f, t = lookup[\"trafo3w\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A]] = \\\n _calc_trafo3w_parameter(net, ppc)\n if \"impedance\" in lookup:\n f, t = lookup[\"impedance\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_R_ASYM, BR_X_ASYM, BR_STATUS]] = \\\n _calc_impedance_parameter(net)\n if \"xward\" in lookup:\n f, t = lookup[\"xward\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R, BR_X, BR_STATUS]] = _calc_xward_parameter(net, ppc)\n\n if \"switch\" in lookup:\n f, t = lookup[\"switch\"]\n ppc[\"branch\"][f:t, [F_BUS, T_BUS, BR_R]] = _calc_switch_parameter(net, ppc)\n\n\ndef _initialize_branch_lookup(net):\n r_switch = net[\"_options\"][\"r_switch\"]\n start = 0\n end = 0\n net._pd2ppc_lookups[\"branch\"] = {}\n for element in [\"line\", \"trafo\", \"trafo3w\", \"impedance\", \"xward\"]:\n if len(net[element]) > 0:\n if element == \"trafo3w\":\n end = start + len(net[element]) * 3\n else:\n end = start + len(net[element])\n net._pd2ppc_lookups[\"branch\"][element] = (start, end)\n start = end\n if r_switch > 0 and len(net._closed_bb_switches) > 0:\n end = start + net._closed_bb_switches.sum()\n net._pd2ppc_lookups[\"branch\"][\"switch\"] = (start, end)\n return end\n\n\ndef _calc_trafo3w_parameter(net, ppc):\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n trafo_df = _trafo_df_from_trafo3w(net)\n net._equiv_trafo3w = trafo_df\n\n temp_para = np.zeros(shape=(len(trafo_df), 9), dtype=np.complex128)\n temp_para[:, 0] = bus_lookup[(trafo_df[\"hv_bus\"].values).astype(int)]\n temp_para[:, 1] = bus_lookup[(trafo_df[\"lv_bus\"].values).astype(int)]\n temp_para[:, 2:7] = _calc_branch_values_from_trafo_df(net, ppc, trafo_df)\n temp_para[:, 7] = trafo_df[\"in_service\"].values\n if copy_constraints_to_ppc:\n max_load = trafo_df.max_loading_percent if \"max_loading_percent\" in trafo_df else 0\n temp_para[:, 8] = max_load / 100. * trafo_df.sn_kva / 1000.\n return temp_para\n\n\ndef _calc_line_parameter(net, ppc):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** -The pandapower format network\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n mode = net[\"_options\"][\"mode\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n line = net[\"line\"]\n fb = bus_lookup[line[\"from_bus\"].values]\n tb = bus_lookup[line[\"to_bus\"].values]\n length = line[\"length_km\"].values\n parallel = line[\"parallel\"].values\n baseR = np.square(ppc[\"bus\"][fb, BASE_KV]) / net.sn_kva * 1e3\n t = np.zeros(shape=(len(line.index), 7), dtype=np.complex128)\n\n t[:, 0] = fb\n t[:, 1] = tb\n\n t[:, 2] = line[\"r_ohm_per_km\"].values * length / baseR / parallel\n t[:, 3] = line[\"x_ohm_per_km\"].values * length / baseR / parallel\n if mode == \"sc\":\n if net[\"_options\"][\"case\"] == \"min\":\n t[:, 2] *= _end_temperature_correction_factor(net)\n else:\n b = (2 * net.f_hz * math.pi * line[\"c_nf_per_km\"].values * 1e-9 * baseR *\n length * parallel)\n g = line[\"g_us_per_km\"].values * 1e-6 * baseR * length * parallel\n t[:, 4] = b - g * 1j\n t[:, 5] = line[\"in_service\"].values\n if copy_constraints_to_ppc:\n max_load = line.max_loading_percent.values if \"max_loading_percent\" in line else 0\n vr = net.bus.vn_kv.loc[line[\"from_bus\"].values].values * np.sqrt(3)\n t[:, 6] = max_load / 100. * line.max_i_ka.values * line.df.values * parallel * vr\n return t\n\n\ndef _calc_trafo_parameter(net, ppc):\n '''\n Calculates the transformer parameter in per unit.\n\n **INPUT**:\n **net** - The pandapower format network\n\n **RETURN**:\n **temp_para** -\n Temporary transformer parameter. Which is a np.complex128\n Numpy array. with the following order:\n 0:hv_bus; 1:lv_bus; 2:r_pu; 3:x_pu; 4:b_pu; 5:tab, 6:shift\n '''\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\n\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n temp_para = np.zeros(shape=(len(net[\"trafo\"].index), 9), dtype=np.complex128)\n trafo = net[\"trafo\"]\n parallel = trafo[\"parallel\"].values\n temp_para[:, 0] = bus_lookup[trafo[\"hv_bus\"].values]\n temp_para[:, 1] = bus_lookup[trafo[\"lv_bus\"].values]\n temp_para[:, 2:7] = _calc_branch_values_from_trafo_df(net, ppc)\n temp_para[:, 7] = trafo[\"in_service\"].values\n if any(trafo.df.values <= 0):\n raise UserWarning(\"Rating factor df must be positive. Transformers with false \"\n \"rating factors: %s\" % trafo.query('df<=0').index.tolist())\n if copy_constraints_to_ppc:\n max_load = trafo.max_loading_percent.values if \"max_loading_percent\" in trafo else 0\n temp_para[:, 8] = max_load / 100. * trafo.sn_kva.values / 1000. * trafo.df.values * parallel\n return temp_para\n\n\ndef _calc_branch_values_from_trafo_df(net, ppc, trafo_df=None):\n \"\"\"\n Calculates the MAT/PYPOWER-branch-attributes from the pandapower trafo dataframe.\n\n PYPOWER and MATPOWER uses the PI-model to model transformers.\n This function calculates the resistance r, reactance x, complex susceptance c and the tap ratio\n according to the given parameters.\n\n .. warning:: This function returns the subsceptance b as a complex number\n **(-img + -re*i)**. MAT/PYPOWER is only intended to calculate the\n imaginary part of the subceptance. However, internally c is\n multiplied by i. By using subsceptance in this way, it is possible\n to consider the ferromagnetic loss of the coil. Which would\n otherwise be neglected.\n\n\n .. warning:: Tab switches effect calculation as following:\n On **high-voltage** side(=1) -> only **tab** gets adapted.\n On **low-voltage** side(=2) -> **tab, x, r** get adapted.\n This is consistent with Sincal.\n The Sincal method in this case is questionable.\n\n\n **INPUT**:\n **pd_trafo** - The pandapower format Transformer Dataframe.\n The Transformer modell will only readfrom pd_net\n\n **RETURN**:\n **temp_para** - Temporary transformer parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n if trafo_df is None:\n trafo_df = net[\"trafo\"]\n parallel = trafo_df[\"parallel\"].values\n vn_lv = get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"lv_bus\"].values, bus_lookup)\n ### Construct np.array to parse results in ###\n # 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n temp_para = np.zeros(shape=(len(trafo_df), 5), dtype=np.complex128)\n vn_trafo_hv, vn_trafo_lv, shift = _calc_tap_from_dataframe(net, trafo_df)\n ratio = _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_trafo_hv, vn_trafo_lv,\n bus_lookup)\n r, x, y = _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, net.sn_kva)\n temp_para[:, 0] = r / parallel\n temp_para[:, 1] = x / parallel\n temp_para[:, 2] = y * parallel\n temp_para[:, 3] = ratio\n temp_para[:, 4] = shift\n return temp_para\n\n\ndef _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, sn_kva):\n mode = net[\"_options\"][\"mode\"]\n trafo_model = net[\"_options\"][\"trafo_model\"]\n\n r, x = _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva)\n if mode == \"sc\":\n y = 0\n if trafo_df.equals(net.trafo):\n from pandapower.shortcircuit.idx_bus import C_MAX\n bus_lookup = net._pd2ppc_lookups[\"bus\"]\n cmax = net._ppc[\"bus\"][bus_lookup[net.trafo.lv_bus.values], C_MAX]\n kt = _transformer_correction_factor(trafo_df.vsc_percent, trafo_df.vscr_percent,\n trafo_df.sn_kva, cmax)\n r *= kt\n x *= kt\n else:\n y = _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva)\n if trafo_model == \"pi\":\n return r, x, y\n elif trafo_model == \"t\":\n return _wye_delta(r, x, y)\n else:\n raise ValueError(\"Unkonwn Transformer Model %s - valid values ar 'pi' or 't'\" % trafo_model)\n\n\ndef _wye_delta(r, x, y):\n \"\"\"\n 20.05.2016 added by Lothar Löwer\n\n Calculate transformer Pi-Data based on T-Data\n\n \"\"\"\n tidx = np.where(y != 0)\n za_star = (r[tidx] + x[tidx] * 1j) / 2\n zc_star = -1j / y[tidx]\n zSum_triangle = za_star * za_star + 2 * za_star * zc_star\n zab_triangle = zSum_triangle / zc_star\n zbc_triangle = zSum_triangle / za_star\n r[tidx] = zab_triangle.real\n x[tidx] = zab_triangle.imag\n y[tidx] = -2j / zbc_triangle\n return r, x, y\n\n\ndef _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva):\n \"\"\"\n Calculate the subsceptance y from the transformer dataframe.\n\n INPUT:\n\n **trafo** (Dataframe) - The dataframe in net.trafo\n which contains transformer calculation values.\n\n OUTPUT:\n **subsceptance** (1d array, np.complex128) - The subsceptance in pu in\n the form (-b_img, -b_real)\n \"\"\"\n baseR = np.square(vn_lv) / sn_kva * 1e3\n\n ### Calculate subsceptance ###\n vnl_squared = trafo_df[\"vn_lv_kv\"].values ** 2\n b_real = trafo_df[\"pfe_kw\"].values / (1000. * vnl_squared) * baseR\n i0 = trafo_df[\"i0_percent\"].values\n pfe = trafo_df[\"pfe_kw\"].values\n sn = trafo_df[\"sn_kva\"].values\n b_img = (i0 / 100. * sn / 1000.) ** 2 - (pfe / 1000.) ** 2\n\n b_img[b_img < 0] = 0\n b_img = np.sqrt(b_img) * baseR / vnl_squared\n y = - b_real * 1j - b_img * np.sign(i0)\n if \"lv\" in trafo_df[\"tp_side\"].values:\n return y / np.square(vn_trafo_lv / trafo_df[\"vn_lv_kv\"].values)\n else:\n return y\n\n\ndef _calc_tap_from_dataframe(net, trafo_df):\n \"\"\"\n Adjust the nominal voltage vnh and vnl to the active tab position \"tp_pos\".\n If \"side\" is 1 (high-voltage side) the high voltage vnh is adjusted.\n If \"side\" is 2 (low-voltage side) the low voltage vnl is adjusted\n\n INPUT:\n **net** - The pandapower format network\n\n **trafo** (Dataframe) - The dataframe in pd_net[\"structure\"][\"trafo\"]\n which contains transformer calculation values.\n\n OUTPUT:\n **vn_hv_kv** (1d array, float) - The adusted high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted low voltages\n\n **trafo_shift** (1d array, float) - phase shift angle\n\n \"\"\"\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\n mode = net[\"_options\"][\"mode\"]\n trafo_shift = trafo_df[\"shift_degree\"].values.astype(float) if calculate_voltage_angles else \\\n np.zeros(len(trafo_df))\n vnh = copy.copy(trafo_df[\"vn_hv_kv\"].values.astype(float))\n vnl = copy.copy(trafo_df[\"vn_lv_kv\"].values.astype(float))\n if mode == \"sc\":\n return vnh, vnl, trafo_shift\n\n tp_diff = trafo_df[\"tp_pos\"].values - trafo_df[\"tp_mid\"].values\n\n cos = lambda x: np.cos(np.deg2rad(x))\n sin = lambda x: np.sin(np.deg2rad(x))\n arctan = lambda x: np.rad2deg(np.arctan(x))\n\n for side, vn, direction in [(\"hv\", vnh, 1), (\"lv\", vnl, -1)]:\n phase_shifters = trafo_df[\"tp_phase_shifter\"].values & (trafo_df[\"tp_side\"].values == side)\n tap_complex = (np.isfinite(trafo_df[\"tp_st_percent\"].values) &\n np.isfinite(trafo_df[\"tp_pos\"].values) &\n (trafo_df[\"tp_side\"].values == side) &\n ~phase_shifters)\n if np.any(tap_complex):\n tp_steps = trafo_df[\"tp_st_percent\"].values[tap_complex] * tp_diff[tap_complex] / 100\n tp_angles = np.nan_to_num(trafo_df[\"tp_st_degree\"].values[tap_complex])\n u1 = vn[tap_complex]\n du = u1 * np.nan_to_num(tp_steps)\n vn[tap_complex] = np.sqrt((u1 + du * cos(tp_angles)) ** 2 + (du * sin(tp_angles)) ** 2)\n trafo_shift[tap_complex] += (arctan(direction * du * sin(tp_angles) /\n (u1 + du * cos(tp_angles))))\n if np.any(phase_shifters):\n trafo_shift[phase_shifters] += (direction * tp_diff[phase_shifters] *\n trafo_df[\"tp_st_degree\"].values[phase_shifters])\n return vnh, vnl, trafo_shift\n\n\ndef _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_kva):\n \"\"\"\n Calculates (Vectorized) the resitance and reactance according to the\n transformer values\n\n \"\"\"\n tap_lv = np.square(vn_trafo_lv / vn_lv) * sn_kva # adjust for low voltage side voltage converter\n sn_trafo_kva = trafo_df.sn_kva.values\n z_sc = trafo_df[\"vsc_percent\"].values / 100. / sn_trafo_kva * tap_lv\n r_sc = trafo_df[\"vscr_percent\"].values / 100. / sn_trafo_kva * tap_lv\n x_sc = np.sign(z_sc) * np.sqrt(z_sc ** 2 - r_sc ** 2)\n return r_sc, x_sc\n\n\ndef _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_hv_kv, vn_lv_kv, bus_lookup):\n \"\"\"\n Calculates (Vectorized) the off nominal tap ratio::\n\n (vn_hv_kv / vn_lv_kv) / (ub1_in_kv / ub2_in_kv)\n\n INPUT:\n **net** (Dataframe) - The net for which to calc the tap ratio.\n\n **vn_hv_kv** (1d array, float) - The adjusted nominal high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted nominal low voltages\n\n OUTPUT:\n **tab** (1d array, float) - The off-nominal tap ratio\n \"\"\"\n # Calculating tab (trasformer off nominal turns ratio)\n tap_rat = vn_hv_kv / vn_lv_kv\n nom_rat = get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"hv_bus\"].values, bus_lookup) / \\\n get_values(ppc[\"bus\"][:, BASE_KV], trafo_df[\"lv_bus\"].values, bus_lookup)\n return tap_rat / nom_rat\n\n\ndef z_br_to_bus(z, s):\n return s[0] * np.array([z[0] / min(s[0], s[1]), z[1] /\n min(s[1], s[2]), z[2] / min(s[0], s[2])])\n\n\ndef wye_delta(zbr_n, s):\n return .5 * s / s[0] * np.array([(zbr_n[0] + zbr_n[2] - zbr_n[1]),\n (zbr_n[1] + zbr_n[0] - zbr_n[2]),\n (zbr_n[2] + zbr_n[1] - zbr_n[0])])\n\n\ndef _trafo_df_from_trafo3w(net):\n mode = net._options[\"mode\"]\n loss_location = net._options[\"trafo3w_losses\"].lower()\n trafos2w = {}\n nr_trafos = len(net[\"trafo3w\"])\n tap_variables = (\"tp_pos\", \"tp_mid\", \"tp_max\", \"tp_min\", \"tp_st_percent\", \"tp_st_degree\")\n i = 0\n for ttab in net[\"trafo3w\"].itertuples():\n vsc = np.array([ttab.vsc_hv_percent, ttab.vsc_mv_percent, ttab.vsc_lv_percent], dtype=float)\n vscr = np.array([ttab.vscr_hv_percent, ttab.vscr_mv_percent, ttab.vscr_lv_percent], dtype=float)\n sn = np.array([ttab.sn_hv_kva, ttab.sn_mv_kva, ttab.sn_lv_kva])\n vsc_2w_delta = z_br_to_bus(vsc, sn)\n vscr_2w_delta = z_br_to_bus(vscr, sn)\n if mode == \"sc\":\n kt = _transformer_correction_factor(vsc, vscr, sn, 1.1)\n vsc_2w_delta *= kt\n vscr_2w_delta *= kt\n vsci_2w_delta = np.sqrt(vsc_2w_delta ** 2 - vscr_2w_delta ** 2)\n vscr_2w = wye_delta(vscr_2w_delta, sn)\n vsci_2w = wye_delta(vsci_2w_delta, sn)\n vsc_2w = np.sign(vsci_2w) * np.sqrt(vsci_2w ** 2 + vscr_2w ** 2)\n taps = [dict((tv, np.nan) for tv in tap_variables) for _ in range(3)]\n for k in range(3):\n taps[k][\"tp_side\"] = None\n\n trafo3w_tap_at_star_point = ttab.tap_at_star_point\n\n if pd.notnull(ttab.tp_side):\n if ttab.tp_side == \"hv\" or ttab.tp_side == 0:\n tp_trafo = 0\n elif ttab.tp_side == \"mv\":\n tp_trafo = 1\n elif ttab.tp_side == \"lv\":\n tp_trafo = 2\n for tv in tap_variables:\n taps[tp_trafo][tv] = getattr(ttab,tv)\n # consider where the tap is located - at the bus or at star point of the 3W-transformer\n if not trafo3w_tap_at_star_point:\n taps[tp_trafo][\"tp_side\"] = \"hv\" if tp_trafo == 0 else \"lv\"\n else:\n taps[tp_trafo][\"tp_side\"] = \"lv\" if tp_trafo == 0 else \"hv\"\n taps[tp_trafo][\"tp_st_degree\"] += 180\n\n max_load = ttab.max_loading_percent if \"max_loading_percent\" in ttab._fields else 0\n\n trafos2w[i] = {\"hv_bus\": ttab.hv_bus, \"lv_bus\": ttab.ad_bus, \"sn_kva\": ttab.sn_hv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_hv_kv,\n \"vscr_percent\": vscr_2w[0], \"vsc_percent\": vsc_2w[0],\n \"pfe_kw\": ttab.pfe_kw if loss_location == \"hv\" else 0,\n \"i0_percent\": ttab.i0_percent if loss_location == \"hv\" else 0,\n \"tp_side\": taps[0][\"tp_side\"],\n \"tp_mid\": taps[0][\"tp_mid\"], \"tp_max\": taps[0][\"tp_max\"],\n \"tp_min\": taps[0][\"tp_min\"], \"tp_pos\": taps[0][\"tp_pos\"],\n \"tp_st_percent\": taps[0][\"tp_st_percent\"],\n \"tp_st_degree\": taps[0][\"tp_st_degree\"], \"tp_phase_shifter\": False,\n \"parallel\": 1, \"df\": 1, \"in_service\": ttab.in_service, \"shift_degree\": 0,\n \"max_loading_percent\": max_load}\n trafos2w[i + nr_trafos] = {\n \"hv_bus\": ttab.ad_bus, \"lv_bus\": ttab.mv_bus, \"sn_kva\": ttab.sn_mv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_mv_kv, \"vscr_percent\": vscr_2w[1],\n \"vsc_percent\": vsc_2w[1], \"pfe_kw\": ttab.pfe_kw if loss_location == \"mv\" else 0,\n \"i0_percent\": ttab.i0_percent * ttab.sn_hv_kva / ttab.sn_mv_kva\n if loss_location == \"mv\" else 0,\n \"tp_side\": taps[1][\"tp_side\"], \"tp_mid\": taps[1][\"tp_mid\"],\n \"tp_max\": taps[1][\"tp_max\"], \"tp_min\": taps[1][\"tp_min\"],\n \"tp_pos\": taps[1][\"tp_pos\"], \"tp_st_percent\": taps[1][\"tp_st_percent\"],\n \"tp_st_degree\": taps[1][\"tp_st_degree\"], \"tp_phase_shifter\": False, \"parallel\": 1,\n \"df\": 1, \"in_service\": ttab.in_service, \"shift_degree\": ttab.shift_mv_degree,\n \"max_loading_percent\": max_load}\n trafos2w[i + 2 * nr_trafos] = {\n \"hv_bus\": ttab.ad_bus, \"lv_bus\": ttab.lv_bus, \"sn_kva\": ttab.sn_lv_kva,\n \"vn_hv_kv\": ttab.vn_hv_kv, \"vn_lv_kv\": ttab.vn_lv_kv, \"vscr_percent\": vscr_2w[2],\n \"vsc_percent\": vsc_2w[2], \"pfe_kw\": ttab.pfe_kw if loss_location == \"lv\" else 0,\n \"i0_percent\": ttab.i0_percent * ttab.sn_hv_kva / ttab.sn_lv_kva\n if loss_location == \"lv\" else 0, \"tp_side\": taps[2][\"tp_side\"],\n \"tp_mid\": taps[2][\"tp_mid\"], \"tp_max\": taps[2][\"tp_max\"],\n \"tp_min\": taps[2][\"tp_min\"], \"tp_pos\": taps[2][\"tp_pos\"],\n \"tp_st_percent\": taps[2][\"tp_st_percent\"], \"tp_st_degree\": taps[2][\"tp_st_degree\"],\n \"tp_phase_shifter\": False, \"parallel\": 1, \"df\": 1, \"in_service\": ttab.in_service,\n \"shift_degree\": ttab.shift_lv_degree, \"max_loading_percent\": max_load}\n i += 1\n\n trafo_df = pd.DataFrame.from_dict(trafos2w, orient=\"index\")\n if any(trafo_df.vsc_percent==0):\n raise UserWarning(\"Equivalent transformer with zero impedance!\")\n return trafo_df\n\n\ndef _calc_impedance_parameter(net):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n t = np.zeros(shape=(len(net[\"impedance\"].index), 7), dtype=np.complex128)\n sn_impedance = net[\"impedance\"][\"sn_kva\"].values\n sn_net = net.sn_kva\n rij = net[\"impedance\"][\"rft_pu\"].values\n xij = net[\"impedance\"][\"xft_pu\"].values\n rji = net[\"impedance\"][\"rtf_pu\"].values\n xji = net[\"impedance\"][\"xtf_pu\"].values\n t[:, 0] = bus_lookup[net[\"impedance\"][\"from_bus\"].values]\n t[:, 1] = bus_lookup[net[\"impedance\"][\"to_bus\"].values]\n t[:, 2] = rij / sn_impedance * sn_net\n t[:, 3] = xij / sn_impedance * sn_net\n t[:, 4] = (rji - rij) / sn_impedance * sn_net\n t[:, 5] = (xji - xij) / sn_impedance * sn_net\n t[:, 6] = net[\"impedance\"][\"in_service\"].values\n return t\n\n\ndef _calc_xward_parameter(net, ppc):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n baseR = np.square(get_values(ppc[\"bus\"][:, BASE_KV], net[\"xward\"][\"bus\"].values, bus_lookup)) / \\\n net.sn_kva * 1e3\n t = np.zeros(shape=(len(net[\"xward\"].index), 5), dtype=np.complex128)\n xw_is = net[\"_is_elements\"][\"xward\"]\n t[:, 0] = bus_lookup[net[\"xward\"][\"bus\"].values]\n t[:, 1] = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\n t[:, 2] = net[\"xward\"][\"r_ohm\"] / baseR\n t[:, 3] = net[\"xward\"][\"x_ohm\"] / baseR\n t[:, 4] = xw_is\n return t\n\n\ndef _gather_branch_switch_info(bus, branch_id, branch_type, net):\n # determine at which end the switch is located\n # 1 = to-bus/lv-bus; 0 = from-bus/hv-bus\n branch_id = int(branch_id)\n if branch_type == \"l\":\n branch_bus = net[\"line\"][\"to_bus\"].at[branch_id]\n is_to_bus = int(branch_bus == bus)\n return is_to_bus, bus, net[\"line\"].index.get_loc(branch_id)\n else:\n branch_bus = net[\"trafo\"][\"lv_bus\"].at[branch_id]\n is_to_bus = int(branch_bus == bus)\n return is_to_bus, bus, net[\"trafo\"].index.get_loc(branch_id)\n\n\ndef _switch_branches(net, ppc):\n from pandapower.shortcircuit.idx_bus import C_MIN, C_MAX\n \"\"\"\n Updates the ppc[\"branch\"] matrix with the changed from or to values\n according of the status of switches\n\n **INPUT**:\n **pd_net** - The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n connectivity_check = net[\"_options\"][\"check_connectivity\"]\n mode = net._options[\"mode\"]\n # get in service elements\n _is_elements = net[\"_is_elements\"]\n bus_is_idx = _is_elements['bus_is_idx']\n lines_is_index = _is_elements[\"line_is_idx\"]\n\n # opened bus line switches\n slidx = (net[\"switch\"][\"closed\"].values == 0) \\\n & (net[\"switch\"][\"et\"].values == \"l\")\n\n # check if there are multiple opened switches at a line (-> set line out of service)\n sw_elem = net['switch'][slidx][\"element\"].values\n m = np.zeros_like(sw_elem, dtype=bool)\n m[np.unique(sw_elem, return_index=True)[1]] = True\n\n # if non unique elements are in sw_elem (= multiple opened bus line switches)\n if np.count_nonzero(m) < len(sw_elem):\n if 'line' not in _is_elements:\n get_is_lines(net)\n lines_is = _is_elements['line']\n lines_to_delete = [idx for idx in sw_elem[~m] if idx in lines_is.index]\n\n from_bus = lines_is.loc[lines_to_delete].from_bus.values\n to_bus = lines_is.loc[lines_to_delete].to_bus.values\n # check if branch is already out of service -> ignore switch\n from_bus = from_bus[~np.isnan(from_bus)].astype(int)\n to_bus = to_bus[~np.isnan(to_bus)].astype(int)\n\n # set branch in ppc out of service if from and to bus are at a line which is in service\n if not connectivity_check and from_bus.size and to_bus.size:\n # get from and to buses of these branches\n ppc_from = bus_lookup[from_bus]\n ppc_to = bus_lookup[to_bus]\n ppc_idx = np.in1d(ppc['branch'][:, 0], ppc_from) \\\n & np.in1d(ppc['branch'][:, 1], ppc_to)\n ppc[\"branch\"][ppc_idx, BR_STATUS] = 0\n\n # drop from in service lines as well\n lines_is = lines_is.drop(lines_to_delete)\n _is_elements[\"line_is_idx\"] = lines_is.index\n\n # opened switches at in service lines\n slidx = slidx \\\n & (np.in1d(net[\"switch\"][\"element\"].values, lines_is_index)) \\\n & (np.in1d(net[\"switch\"][\"bus\"].values, bus_is_idx))\n nlo = np.count_nonzero(slidx)\n\n stidx = (net.switch[\"closed\"].values == 0) & (net.switch[\"et\"].values == \"t\")\n nto = np.count_nonzero(stidx)\n\n if (nlo + nto) > 0:\n n_bus = len(ppc[\"bus\"])\n\n if nlo:\n future_buses = [ppc[\"bus\"]]\n line_switches = net[\"switch\"].loc[slidx]\n\n # determine on which side the switch is located\n mapfunc = partial(_gather_branch_switch_info, branch_type=\"l\", net=net)\n ls_info = list(map(mapfunc,\n line_switches[\"bus\"].values,\n line_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at to_bus, 0 else\n # 1: bus of the switch\n # 2: position of the line a switch is connected to\n ls_info = np.array(ls_info, dtype=int)\n\n # build new buses\n new_ls_buses = np.zeros(shape=(nlo, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus, n_bus + nlo)\n # the newly created buses\n new_ls_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ls_buses[:, 0] = new_indices\n new_ls_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ls_info[:, 1], bus_lookup)\n # set voltage of new buses to voltage on other branch end\n to_buses = ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1].real.astype(int)\n from_buses = ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0].real \\\n .astype(int)\n\n if len(to_buses):\n ix = ls_info[:, 0] == 1\n new_ls_buses[ix, VM] = ppc[\"bus\"][to_buses, VM]\n new_ls_buses[ix, VA] = ppc[\"bus\"][to_buses, VA]\n if mode == \"sc\":\n new_ls_buses[ix, C_MAX] = ppc[\"bus\"][to_buses, C_MAX]\n new_ls_buses[ix, C_MIN] = ppc[\"bus\"][to_buses, C_MIN]\n\n if len(from_buses):\n ix = ls_info[:, 0] == 0\n new_ls_buses[ix, VM] = ppc[\"bus\"][from_buses, VM]\n new_ls_buses[ix, VA] = ppc[\"bus\"][from_buses, VA]\n if mode == \"sc\":\n new_ls_buses[ix, C_MAX] = ppc[\"bus\"][from_buses, C_MAX]\n new_ls_buses[ix, C_MIN] = ppc[\"bus\"][from_buses, C_MIN]\n\n future_buses.append(new_ls_buses)\n # re-route the end of lines to a new bus\n ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1] = \\\n new_indices[ls_info[:, 0].astype(bool)]\n ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0] = \\\n new_indices[np.logical_not(ls_info[:, 0])]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n if nto:\n future_buses = [ppc[\"bus\"]]\n trafo_switches = net[\"switch\"].loc[stidx]\n\n # determine on which side the switch is located\n mapfunc = partial(_gather_branch_switch_info, branch_type=\"t\", net=net)\n ts_info = list(map(mapfunc,\n trafo_switches[\"bus\"].values,\n trafo_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at lv_bus, 0 else\n # 1: bus of the switch\n # 2: position of the trafo a switch is connected to\n ts_info = np.array(ts_info, dtype=int)\n\n # build new buses\n new_ts_buses = np.zeros(shape=(nto, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus + nlo, n_bus + nlo + nto)\n new_ts_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ts_buses[:, 0] = new_indices\n new_ts_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ts_info[:, 1], bus_lookup)\n # set voltage of new buses to voltage on other branch end\n to_buses = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], 1].real.astype(int)\n from_buses = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], 0].real \\\n .astype(int)\n\n # set newly created buses to voltage on other side of\n if len(to_buses):\n ix = ts_info[:, 0] == 1\n taps = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], VA].real\n shift = ppc[\"branch\"][ts_info[ts_info[:, 0].astype(bool), 2], BASE_KV].real\n new_ts_buses[ix, VM] = ppc[\"bus\"][to_buses, VM] * taps\n new_ts_buses[ix, VA] = ppc[\"bus\"][to_buses, VA] + shift\n if mode == \"sc\":\n new_ts_buses[ix, C_MAX] = ppc[\"bus\"][to_buses, C_MAX]\n new_ts_buses[ix, C_MIN] = 0.95 # ppc[\"bus\"][to_buses, C_MIN]\n if len(from_buses):\n ix = ts_info[:, 0] == 0\n taps = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], VA].real\n shift = ppc[\"branch\"][ts_info[np.logical_not(ts_info[:, 0]), 2], BASE_KV].real\n new_ts_buses[ix, VM] = ppc[\"bus\"][from_buses, VM] * taps\n new_ts_buses[ix, VA] = ppc[\"bus\"][from_buses, VA] + shift\n if mode == \"sc\":\n new_ts_buses[ix, C_MAX] = ppc[\"bus\"][from_buses, C_MAX]\n new_ts_buses[ix, C_MIN] = ppc[\"bus\"][from_buses, C_MIN]\n future_buses.append(new_ts_buses)\n\n # re-route the hv/lv-side of the trafo to a new bus\n # (trafo entries follow line entries)\n at_lv_bus = ts_info[:, 0].astype(bool)\n at_hv_bus = ~at_lv_bus\n ppc[\"branch\"][len(net.line) + ts_info[at_lv_bus, 2], 1] = \\\n new_indices[at_lv_bus]\n ppc[\"branch\"][len(net.line) + ts_info[at_hv_bus, 2], 0] = \\\n new_indices[at_hv_bus]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n\ndef _branches_with_oos_buses(net, ppc):\n \"\"\"\n Updates the ppc[\"branch\"] matrix with the changed from or to values\n if the branch is connected to an out of service bus\n\n Adds auxiliary buses if branch is connected to an out of service bus\n Sets branch out of service if connected to two out of service buses\n\n **INPUT**:\n **n** - The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n **bus_is** - The in service buses\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n # get in service elements\n _is_elements = net[\"_is_elements\"]\n bus_is_idx = _is_elements['bus_is_idx']\n line_is_idx = _is_elements['line_is_idx']\n\n n_oos_buses = len(net['bus']) - len(bus_is_idx)\n\n # only filter lines at oos buses if oos buses exists\n if n_oos_buses > 0:\n n_bus = len(ppc[\"bus\"])\n future_buses = [ppc[\"bus\"]]\n # out of service buses\n bus_oos = np.setdiff1d(net['bus'].index.values, bus_is_idx)\n # from buses of line\n line_buses = net[\"line\"][[\"from_bus\", \"to_bus\"]].loc[line_is_idx].values\n f_bus = line_buses[:, 0]\n t_bus = line_buses[:, 1]\n\n # determine on which side of the line the oos bus is located\n mask_from = np.in1d(f_bus, bus_oos)\n mask_to = np.in1d(t_bus, bus_oos)\n\n mask_and = mask_to & mask_from\n if np.any(mask_and):\n mask_from[mask_and] = False\n mask_to[mask_and] = False\n\n # get lines that are connected to oos bus at exactly one side\n # buses that are connected to two oos buses will be removed by ext2int\n mask_or = mask_to | mask_from\n # check whether buses are connected to line\n oos_buses_at_lines = np.r_[f_bus[mask_from], t_bus[mask_to]]\n n_oos_buses_at_lines = len(oos_buses_at_lines)\n\n # only if oos_buses are at lines (they could be isolated as well)\n if n_oos_buses_at_lines > 0:\n ls_info = np.zeros((n_oos_buses_at_lines, 3), dtype=int)\n ls_info[:, 0] = mask_to[mask_or] & ~mask_from[mask_or]\n ls_info[:, 1] = oos_buses_at_lines\n ls_info[:, 2] = np.nonzero(np.in1d(net['line'].index, line_is_idx[mask_or]))[0]\n\n # ls_info = list(map(mapfunc,\n # line_switches[\"bus\"].values,\n # line_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at to_bus, 0 else\n # 1: bus of the switch\n # 2: position of the line a switch is connected to\n # ls_info = np.array(ls_info, dtype=int)\n\n # build new buses\n new_ls_buses = np.zeros(shape=(n_oos_buses_at_lines, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus, n_bus + n_oos_buses_at_lines)\n # the newly created buses\n new_ls_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ls_buses[:, 0] = new_indices\n new_ls_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ls_info[:, 1], bus_lookup)\n\n future_buses.append(new_ls_buses)\n\n # re-route the end of lines to a new bus\n ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1] = \\\n new_indices[ls_info[:, 0].astype(bool)]\n ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0] = \\\n new_indices[np.logical_not(ls_info[:, 0])]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n\ndef _update_trafo_trafo3w_ppc(net, ppc):\n \"\"\"\n Updates the trafo and trafo3w values when reusing the ppc between two powerflows\n\n :param net: pandapower net\n :param ppc: pypower format\n :return: ppc with updates values\n \"\"\"\n line_end = len(net[\"line\"])\n trafo_end = line_end + len(net[\"trafo\"])\n trafo3w_end = trafo_end + len(net[\"trafo3w\"]) * 3\n\n if trafo_end > line_end:\n ppc[\"branch\"][line_end:trafo_end,\n [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A]] = \\\n _calc_trafo_parameter(net, ppc)\n if trafo3w_end > trafo_end:\n ppc[\"branch\"][trafo_end:trafo3w_end, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS]] = \\\n _calc_trafo3w_parameter(net, ppc)\n\n\ndef _calc_switch_parameter(net, ppc):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** -The pandapower format network\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n r_switch = net[\"_options\"][\"r_switch\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n switch = net.switch[net._closed_bb_switches]\n fb = bus_lookup[switch[\"bus\"].values]\n tb = bus_lookup[switch[\"element\"].values]\n baseR = np.square(ppc[\"bus\"][fb, BASE_KV]) / net.sn_kva * 1e3\n t = np.zeros(shape=(len(switch), 3), dtype=np.complex128)\n\n t[:, 0] = fb\n t[:, 1] = tb\n\n t[:, 2] = r_switch / baseR\n return t\n\n\ndef _end_temperature_correction_factor(net):\n if \"endtemp_degree\" not in net.line:\n raise UserWarning(\"Specify end temperature for lines in net.endtemp_degree\")\n return (1 + .004 * (net.line.endtemp_degree.values.astype(float) - 20)) # formula from standard\n\n\ndef _transformer_correction_factor(vsc, vscr, sn, cmax):\n sn = sn / 1000.\n zt = vsc / 100 / sn\n rt = vscr / 100 / sn\n xt = np.sqrt(zt ** 2 - rt ** 2)\n kt = 0.95 * cmax / (1 + .6 * xt * sn)\n return kt\n\n\ndef get_is_lines(net):\n _is_elements = net[\"_is_elements\"]\n _is_elements[\"line\"] = net[\"line\"][net[\"line\"][\"in_service\"].values.astype(bool)]\n", "path": "pandapower/build_branch.py" } ]
diff --git a/pandapower/build_branch.py b/pandapower/build_branch.py index ca16f704a..f8ee1e0be 100644 --- a/pandapower/build_branch.py +++ b/pandapower/build_branch.py @@ -497,6 +497,8 @@ def _trafo_df_from_trafo3w(net): i += 1 trafo_df = pd.DataFrame.from_dict(trafos2w, orient="index") + if any(trafo_df.vsc_percent==0): + raise UserWarning("Equivalent transformer with zero impedance!") return trafo_df
replicate__cog-653
Support lists of `BaseModel` for outputs The following model fails on Replicate.com with a cryptic `Can't pickle <class 'predict.Output'>: it's not the same object as predict.Output` error: ```python class Output(BaseModel): foo: str bar: str class Predictor(BasePredictor): def predict(self) -> List[Output]: return [Output(foo="foo", bar="bar")] ``` This is [documented deep in Cog's Python layer](https://github.com/replicate/cog/blob/main/python/cog/server/runner.py#L238). We should support this type of output.
[ { "content": "import multiprocessing\nimport types\nfrom enum import Enum\nfrom multiprocessing.connection import Connection\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom ..predictor import load_config, load_predictor\nfrom .log_capture import capture_log\n\n\nclass PredictionRunner:\n PROCESSING_DONE = 1\n\n class OutputType(Enum):\n NOT_STARTED = 0\n SINGLE = 1\n GENERATOR = 2\n\n def __init__(self) -> None:\n self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n (\n self.prediction_input_pipe_reader,\n self.prediction_input_pipe_writer,\n ) = multiprocessing.Pipe(duplex=False)\n self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n\n def setup(self) -> None:\n \"\"\"\n Sets up the predictor in a subprocess. Blocks until the predictor has\n finished setup. To start a prediction after setup call `run()`.\n \"\"\"\n # `multiprocessing.get_context(\"spawn\")` returns the same API as\n # `multiprocessing`, but will use the spawn method when creating any\n # subprocess. Using the spawn method for the predictor subprocess is\n # useful for compatibility with CUDA, which cannot run in a process\n # that gets forked. If we can guarantee that all initialization happens\n # within the subprocess, we could probably get away with using fork\n # here instead.\n self.predictor_process = multiprocessing.get_context(\"spawn\").Process(\n target=self._start_predictor_process\n )\n\n self._is_processing = True\n self.predictor_process.start()\n\n # poll with an infinite timeout to avoid burning resources in the loop\n while self.done_pipe_reader.poll(timeout=None) and self.is_processing():\n pass\n\n def _start_predictor_process(self) -> None:\n config = load_config()\n self.predictor = load_predictor(config)\n self.predictor.setup()\n\n # tell the main process we've finished setup\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n while True:\n try:\n prediction_input = self.prediction_input_pipe_reader.recv()\n self._run_prediction(prediction_input)\n except EOFError:\n continue\n\n def run(self, **prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Starts running a prediction in the predictor subprocess, using the\n inputs provided in `prediction_input`.\n\n The subprocess will send prediction output and logs to pipes as soon as\n they're available. You can check if the pipes have any data using\n `has_output_waiting()` and `has_logs_waiting()`. You can read data from\n the pipes using `read_output()` and `read_logs()`.\n\n Use `is_processing()` to check whether more data is expected in the\n pipe for prediction output.\n \"\"\"\n # We're starting processing!\n self._is_processing = True\n\n # We don't know whether or not we've got a generator (progressive\n # output) until we start getting output from the model\n self._is_output_generator = self.OutputType.NOT_STARTED\n\n # We haven't encountered an error yet\n self._error = None\n\n # Send prediction input through the pipe to the predictor subprocess\n self.prediction_input_pipe_writer.send(prediction_input)\n\n def is_processing(self) -> bool:\n \"\"\"\n Returns True if the subprocess running the prediction is still\n processing.\n \"\"\"\n if self.done_pipe_reader.poll():\n try:\n if self.done_pipe_reader.recv() == self.PROCESSING_DONE:\n self._is_processing = False\n except EOFError:\n pass\n\n return self._is_processing\n\n def has_output_waiting(self) -> bool:\n return self.predictor_pipe_reader.poll()\n\n def read_output(self) -> List[Any]:\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return []\n\n output = []\n while self.has_output_waiting():\n try:\n output.append(self.predictor_pipe_reader.recv())\n except EOFError:\n break\n return output\n\n def has_logs_waiting(self) -> bool:\n return self.logs_pipe_reader.poll()\n\n def read_logs(self) -> List[str]:\n logs = []\n while self.has_logs_waiting():\n try:\n logs.append(self.logs_pipe_reader.recv())\n except EOFError:\n break\n return logs\n\n def is_output_generator(self) -> Optional[bool]:\n \"\"\"\n Returns `True` if the output is a generator, `False` if it's not, and\n `None` if we don't know yet.\n \"\"\"\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n if self.has_output_waiting():\n # if there's output waiting use the first one to set whether\n # we've got a generator, with a safety check\n self._is_output_generator = self.predictor_pipe_reader.recv()\n assert isinstance(self._is_output_generator, self.OutputType)\n\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return None\n elif self._is_output_generator is self.OutputType.SINGLE:\n return False\n elif self._is_output_generator is self.OutputType.GENERATOR:\n return True\n\n def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Sends a boolean first, to indicate whether the output is a generator.\n After that it sends the output(s).\n\n If the predictor raises an exception it'll send it to the error pipe\n writer and then exit.\n\n When the prediction is finished it'll send a token to the done pipe.\n \"\"\"\n # Empty all the pipes before we start sending more messages to them\n drain_pipe(self.logs_pipe_reader)\n drain_pipe(self.predictor_pipe_reader)\n drain_pipe(self.error_pipe_reader)\n drain_pipe(self.done_pipe_reader)\n\n with capture_log(self.logs_pipe_writer):\n try:\n output = self.predictor.predict(**prediction_input)\n\n if isinstance(output, types.GeneratorType):\n self.predictor_pipe_writer.send(self.OutputType.GENERATOR)\n while True:\n try:\n self.predictor_pipe_writer.send(\n next(make_pickleable(output))\n )\n except StopIteration:\n break\n else:\n self.predictor_pipe_writer.send(self.OutputType.SINGLE)\n self.predictor_pipe_writer.send(make_pickleable(output))\n except Exception as e:\n self.error_pipe_writer.send(e)\n\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n def error(self) -> Optional[str]:\n \"\"\"\n Returns the error encountered by the predictor, if one exists.\n \"\"\"\n if self._error is None and self.error_pipe_reader.poll():\n try:\n self._error = self.error_pipe_reader.recv()\n except EOFError:\n # I don't know how this is reachable ¯\\_(ツ)_/¯\n pass\n\n return self._error\n\n\ndef drain_pipe(pipe_reader: Connection) -> None:\n \"\"\"\n Reads all available messages from a pipe and discards them. This serves to\n clear the pipe for future usage.\n \"\"\"\n while pipe_reader.poll():\n try:\n pipe_reader.recv()\n except EOFError:\n break\n\n\ndef make_pickleable(obj: Any) -> Any:\n \"\"\"\n Returns a version of `obj` which can be pickled and therefore sent through\n the pipe to the main process.\n\n If the predictor uses a custom output like:\n\n class Output(BaseModel):\n text: str\n\n then the output can't be sent through the pipe because:\n\n > Can't pickle <class 'predict.Output'>: it's not the same object as\n > 'predict.Output'\n\n The way we're getting around this here will only work for singly-nested\n outputs. If there's a complex object inside a complex object, it's likely\n to fall over.\n\n A better fix for this would be to work out why the pickling process is\n getting a different class when loading `Output`, so the pickling Just\n Works.\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n else:\n return obj\n", "path": "python/cog/server/runner.py" } ]
[ { "content": "import multiprocessing\nimport types\nfrom enum import Enum\nfrom multiprocessing.connection import Connection\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom ..predictor import load_config, load_predictor\nfrom .log_capture import capture_log\n\n\nclass PredictionRunner:\n PROCESSING_DONE = 1\n\n class OutputType(Enum):\n NOT_STARTED = 0\n SINGLE = 1\n GENERATOR = 2\n\n def __init__(self) -> None:\n self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n (\n self.prediction_input_pipe_reader,\n self.prediction_input_pipe_writer,\n ) = multiprocessing.Pipe(duplex=False)\n self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n\n def setup(self) -> None:\n \"\"\"\n Sets up the predictor in a subprocess. Blocks until the predictor has\n finished setup. To start a prediction after setup call `run()`.\n \"\"\"\n # `multiprocessing.get_context(\"spawn\")` returns the same API as\n # `multiprocessing`, but will use the spawn method when creating any\n # subprocess. Using the spawn method for the predictor subprocess is\n # useful for compatibility with CUDA, which cannot run in a process\n # that gets forked. If we can guarantee that all initialization happens\n # within the subprocess, we could probably get away with using fork\n # here instead.\n self.predictor_process = multiprocessing.get_context(\"spawn\").Process(\n target=self._start_predictor_process\n )\n\n self._is_processing = True\n self.predictor_process.start()\n\n # poll with an infinite timeout to avoid burning resources in the loop\n while self.done_pipe_reader.poll(timeout=None) and self.is_processing():\n pass\n\n def _start_predictor_process(self) -> None:\n config = load_config()\n self.predictor = load_predictor(config)\n self.predictor.setup()\n\n # tell the main process we've finished setup\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n while True:\n try:\n prediction_input = self.prediction_input_pipe_reader.recv()\n self._run_prediction(prediction_input)\n except EOFError:\n continue\n\n def run(self, **prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Starts running a prediction in the predictor subprocess, using the\n inputs provided in `prediction_input`.\n\n The subprocess will send prediction output and logs to pipes as soon as\n they're available. You can check if the pipes have any data using\n `has_output_waiting()` and `has_logs_waiting()`. You can read data from\n the pipes using `read_output()` and `read_logs()`.\n\n Use `is_processing()` to check whether more data is expected in the\n pipe for prediction output.\n \"\"\"\n # We're starting processing!\n self._is_processing = True\n\n # We don't know whether or not we've got a generator (progressive\n # output) until we start getting output from the model\n self._is_output_generator = self.OutputType.NOT_STARTED\n\n # We haven't encountered an error yet\n self._error = None\n\n # Send prediction input through the pipe to the predictor subprocess\n self.prediction_input_pipe_writer.send(prediction_input)\n\n def is_processing(self) -> bool:\n \"\"\"\n Returns True if the subprocess running the prediction is still\n processing.\n \"\"\"\n if self.done_pipe_reader.poll():\n try:\n if self.done_pipe_reader.recv() == self.PROCESSING_DONE:\n self._is_processing = False\n except EOFError:\n pass\n\n return self._is_processing\n\n def has_output_waiting(self) -> bool:\n return self.predictor_pipe_reader.poll()\n\n def read_output(self) -> List[Any]:\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return []\n\n output = []\n while self.has_output_waiting():\n try:\n output.append(self.predictor_pipe_reader.recv())\n except EOFError:\n break\n return output\n\n def has_logs_waiting(self) -> bool:\n return self.logs_pipe_reader.poll()\n\n def read_logs(self) -> List[str]:\n logs = []\n while self.has_logs_waiting():\n try:\n logs.append(self.logs_pipe_reader.recv())\n except EOFError:\n break\n return logs\n\n def is_output_generator(self) -> Optional[bool]:\n \"\"\"\n Returns `True` if the output is a generator, `False` if it's not, and\n `None` if we don't know yet.\n \"\"\"\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n if self.has_output_waiting():\n # if there's output waiting use the first one to set whether\n # we've got a generator, with a safety check\n self._is_output_generator = self.predictor_pipe_reader.recv()\n assert isinstance(self._is_output_generator, self.OutputType)\n\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return None\n elif self._is_output_generator is self.OutputType.SINGLE:\n return False\n elif self._is_output_generator is self.OutputType.GENERATOR:\n return True\n\n def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Sends a boolean first, to indicate whether the output is a generator.\n After that it sends the output(s).\n\n If the predictor raises an exception it'll send it to the error pipe\n writer and then exit.\n\n When the prediction is finished it'll send a token to the done pipe.\n \"\"\"\n # Empty all the pipes before we start sending more messages to them\n drain_pipe(self.logs_pipe_reader)\n drain_pipe(self.predictor_pipe_reader)\n drain_pipe(self.error_pipe_reader)\n drain_pipe(self.done_pipe_reader)\n\n with capture_log(self.logs_pipe_writer):\n try:\n output = self.predictor.predict(**prediction_input)\n\n if isinstance(output, types.GeneratorType):\n self.predictor_pipe_writer.send(self.OutputType.GENERATOR)\n while True:\n try:\n self.predictor_pipe_writer.send(\n next(make_pickleable(output))\n )\n except StopIteration:\n break\n else:\n self.predictor_pipe_writer.send(self.OutputType.SINGLE)\n self.predictor_pipe_writer.send(make_pickleable(output))\n except Exception as e:\n self.error_pipe_writer.send(e)\n\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n def error(self) -> Optional[str]:\n \"\"\"\n Returns the error encountered by the predictor, if one exists.\n \"\"\"\n if self._error is None and self.error_pipe_reader.poll():\n try:\n self._error = self.error_pipe_reader.recv()\n except EOFError:\n # I don't know how this is reachable ¯\\_(ツ)_/¯\n pass\n\n return self._error\n\n\ndef drain_pipe(pipe_reader: Connection) -> None:\n \"\"\"\n Reads all available messages from a pipe and discards them. This serves to\n clear the pipe for future usage.\n \"\"\"\n while pipe_reader.poll():\n try:\n pipe_reader.recv()\n except EOFError:\n break\n\n\ndef make_pickleable(obj: Any) -> Any:\n \"\"\"\n Returns a version of `obj` which can be pickled and therefore sent through\n the pipe to the main process.\n\n If the predictor uses a custom output like:\n\n class Output(BaseModel):\n text: str\n\n then the output can't be sent through the pipe because:\n\n > Can't pickle <class 'predict.Output'>: it's not the same object as\n > 'predict.Output'\n\n The way we're getting around this here will only work for singly-nested\n outputs. If there's a complex object inside a complex object, it's likely\n to fall over.\n\n A better fix for this would be to work out why the pickling process is\n getting a different class when loading `Output`, so the pickling Just\n Works.\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n elif isinstance(obj, List):\n return [make_pickleable(item) for item in obj]\n else:\n return obj\n", "path": "python/cog/server/runner.py" } ]
diff --git a/python/cog/server/runner.py b/python/cog/server/runner.py index 1df297e4ef..ade25f43de 100644 --- a/python/cog/server/runner.py +++ b/python/cog/server/runner.py @@ -248,5 +248,7 @@ class Output(BaseModel): """ if isinstance(obj, BaseModel): return obj.dict(exclude_unset=True) + elif isinstance(obj, List): + return [make_pickleable(item) for item in obj] else: return obj
gratipay__gratipay.com-1237
ImportError: cannot import name Participant I get this when running some of the test scripts individually.
[ { "content": "\"\"\"Defines a Participant class.\n\"\"\"\nimport random\nimport re\nimport uuid\nfrom decimal import Decimal\n\nimport gittip\nfrom aspen import Response\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\nfrom gittip.models import community\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \".,-_;:@ \")\n\n\nclass NoParticipantId(Exception):\n \"\"\"Represent a bug where we treat an anonymous user as a participant.\n \"\"\"\n\n\nclass NeedConfirmation(Exception):\n \"\"\"We need confirmation before we'll proceed.\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\ndef gen_random_usernames():\n \"\"\"Yield up to 100 random usernames.\n \"\"\"\n seatbelt = 0\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n seatbelt += 1\n if seatbelt > 100:\n raise StopIteration\n\n\ndef reserve_a_random_username(db=None):\n \"\"\"Reserve and a random username.\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n if db is None: # During take_over we want to use our own transaction.\n db = gittip.db\n\n for username in gen_random_usernames():\n try:\n db.execute( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s)\"\n , (username, username.lower())\n )\n except IntegrityError: # Collision, try again with another value.\n pass\n else:\n break\n\n return username\n\n\ndef require_username(func):\n # XXX This should be done with a metaclass, maybe?\n def wrapped(self, *a, **kw):\n if self.username is None:\n raise NoParticipantId(\"User does not participate, apparently.\")\n return func(self, *a, **kw)\n return wrapped\n\n\nclass Participant(object):\n \"\"\"Represent a Gittip participant.\n \"\"\"\n\n class NoSelfTipping(Exception): pass\n class BadAmount(Exception): pass\n\n\n def __init__(self, username):\n typecheck(username, (unicode, None))\n self.username = username\n\n\n @require_username\n def get_details(self):\n \"\"\"Return a dictionary.\n \"\"\"\n SELECT = \"\"\"\n\n SELECT *\n FROM participants\n WHERE username = %s\n\n \"\"\"\n return gittip.db.fetchone(SELECT, (self.username,))\n\n\n # API Key\n # =======\n\n @require_username\n def recreate_api_key(self):\n api_key = str(uuid.uuid4())\n SQL = \"UPDATE participants SET api_key=%s WHERE username=%s\"\n gittip.db.execute(SQL, (api_key, self.username))\n return api_key\n\n\n # Claiming\n # ========\n # An unclaimed Participant is a stub that's created when someone pledges to\n # give to an AccountElsewhere that's not been connected on Gittip yet.\n\n @require_username\n def resolve_unclaimed(self):\n \"\"\"Given a username, return an URL path.\n \"\"\"\n rec = gittip.db.fetchone(\"SELECT platform, user_info FROM elsewhere \"\n \"WHERE participant = %s\", (self.username,))\n if rec is None:\n out = None\n elif rec['platform'] == 'github':\n out = '/on/github/%s/' % rec['user_info']['login']\n else:\n assert rec['platform'] == 'twitter'\n out = '/on/twitter/%s/' % rec['user_info']['screen_name']\n return out\n\n @require_username\n def set_as_claimed(self):\n CLAIM = \"\"\"\\\n\n UPDATE participants\n SET claimed_time=CURRENT_TIMESTAMP\n WHERE username=%s\n AND claimed_time IS NULL\n\n \"\"\"\n gittip.db.execute(CLAIM, (self.username,))\n\n @require_username\n def insert_into_communities(self, is_member, name, slug):\n username = self.username\n gittip.db.execute(\"\"\"\n\n INSERT INTO communities\n (ctime, name, slug, participant, is_member)\n VALUES ( COALESCE (( SELECT ctime\n FROM communities\n WHERE (participant=%s AND slug=%s)\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %s, %s, %s, %s\n )\n RETURNING ( SELECT count(*) = 0\n FROM communities\n WHERE participant=%s\n )\n AS first_time_community\n\n \"\"\", (username, slug, name, slug, username, is_member, username))\n\n @require_username\n def change_username(self, suggested):\n \"\"\"Raise Response or return None.\n\n We want to be pretty loose with usernames. Unicode is allowed--XXX\n aspen bug :(. So are spaces.Control characters aren't. We also limit to\n 32 characters in length.\n\n \"\"\"\n for i, c in enumerate(suggested):\n if i == 32:\n raise Response(413) # Request Entity Too Large (more or less)\n elif ord(c) < 128 and c not in ASCII_ALLOWED_IN_USERNAME:\n raise Response(400) # Yeah, no.\n elif c not in ASCII_ALLOWED_IN_USERNAME:\n raise Response(400) # XXX Burned by an Aspen bug. :`-(\n # https://github.com/whit537/aspen/issues/102\n\n if suggested in gittip.RESTRICTED_USERNAMES:\n raise Response(400)\n\n if suggested != self.username:\n # Will raise IntegrityError if the desired username is taken.\n rec = gittip.db.fetchone(\"UPDATE participants \"\n \"SET username=%s WHERE username=%s \"\n \"RETURNING username\",\n (suggested, self.username))\n\n assert rec is not None # sanity check\n assert suggested == rec['username'] # sanity check\n self.username = suggested\n\n\n @require_username\n def get_accounts_elsewhere(self):\n \"\"\"Return a two-tuple of elsewhere dicts.\n \"\"\"\n ACCOUNTS = \"\"\"\n SELECT * FROM elsewhere WHERE participant=%s;\n \"\"\"\n accounts = gittip.db.fetchall(ACCOUNTS, (self.username,))\n assert accounts is not None\n twitter_account = None\n github_account = None\n for account in accounts:\n if account['platform'] == 'github':\n github_account = account\n else:\n assert account['platform'] == 'twitter', account['platform']\n twitter_account = account\n return (github_account, twitter_account)\n\n\n @require_username\n def set_tip_to(self, tippee, amount):\n \"\"\"Given participant id and amount as str, return a tuple.\n\n We INSERT instead of UPDATE, so that we have history to explore. The\n COALESCE function returns the first of its arguments that is not NULL.\n The effect here is to stamp all tips with the timestamp of the first\n tip from this user to that. I believe this is used to determine the\n order of transfers during payday.\n\n The tuple returned is the amount as a Decimal and a boolean indicating\n whether this is the first time this tipper has tipped (we want to track\n that as part of our conversion funnel).\n\n \"\"\"\n\n if self.username == tippee:\n raise self.NoSelfTipping\n\n amount = Decimal(amount) # May raise InvalidOperation\n hi = gittip.AMOUNTS[0]\n lo = gittip.AMOUNTS[-1]\n if (amount < lo) or (amount > hi):\n raise self.BadAmount\n\n NEW_TIP = \"\"\"\\\n\n INSERT INTO tips\n (ctime, tipper, tippee, amount)\n VALUES ( COALESCE (( SELECT ctime\n FROM tips\n WHERE (tipper=%s AND tippee=%s)\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %s, %s, %s\n )\n RETURNING ( SELECT count(*) = 0 FROM tips WHERE tipper=%s )\n AS first_time_tipper\n\n \"\"\"\n args = (self.username, tippee, self.username, tippee, amount, \\\n self.username)\n first_time_tipper = \\\n gittip.db.fetchone(NEW_TIP, args)['first_time_tipper']\n return amount, first_time_tipper\n\n\n @require_username\n def get_tip_to(self, tippee):\n \"\"\"Given two user ids, return a Decimal.\n \"\"\"\n TIP = \"\"\"\\\n\n SELECT amount\n FROM tips\n WHERE tipper=%s\n AND tippee=%s\n ORDER BY mtime DESC\n LIMIT 1\n\n \"\"\"\n rec = gittip.db.fetchone(TIP, (self.username, tippee))\n if rec is None:\n tip = Decimal('0.00')\n else:\n tip = rec['amount']\n return tip\n\n\n @require_username\n def get_dollars_receiving(self):\n \"\"\"Return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS dollars_receiving\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n amount = None\n else:\n amount = rec['dollars_receiving'] # might be None\n\n if amount is None:\n amount = Decimal('0.00')\n\n return amount\n\n\n @require_username\n def get_dollars_giving(self):\n \"\"\"Return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS dollars_giving\n FROM ( SELECT DISTINCT ON (tippee)\n amount\n , tippee\n FROM tips\n JOIN participants p ON p.username = tippee\n WHERE tipper=%s\n AND is_suspicious IS NOT true\n AND claimed_time IS NOT NULL\n ORDER BY tippee\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n amount = None\n else:\n amount = rec['dollars_giving'] # might be None\n\n if amount is None:\n amount = Decimal('0.00')\n\n return amount\n\n\n @require_username\n def get_number_of_backers(self):\n \"\"\"Given a unicode, return an int.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT count(amount) AS nbackers\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n nbackers = None\n else:\n nbackers = rec['nbackers'] # might be None\n\n if nbackers is None:\n nbackers = 0\n\n return nbackers\n\n\n @require_username\n def get_tip_distribution(self):\n SQL = \"\"\"\n\n SELECT amount\n , count(amount) AS ncontributing\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n GROUP BY amount\n ORDER BY amount\n\n \"\"\"\n npatrons = 0.0 # float to trigger float division\n contributed = Decimal('0.00')\n other = [-1, 0, 0] # accumulates old tip amounts\n out = []\n for rec in gittip.db.fetchall(SQL, (self.username,)):\n if rec['amount'] not in gittip.AMOUNTS:\n other[1] += rec['ncontributing']\n other[2] += rec['amount'] * rec['ncontributing']\n contributed += rec['amount'] * rec['ncontributing']\n else:\n out.append([ rec['amount']\n , rec['ncontributing']\n , rec['amount'] * rec['ncontributing']\n ])\n contributed += out[-1][2]\n npatrons += rec['ncontributing']\n if other != [-1, 0, 0]:\n out.append(other)\n for row in out:\n row.append((row[1] / npatrons) if npatrons > 0 else 0)\n row.append((row[2] / contributed) if contributed > 0 else 0)\n return out, npatrons, contributed\n\n\n @require_username\n def get_giving_for_profile(self, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used to populate a participant's page for their own\n viewing pleasure.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n , p.username_lower\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n WHERE tipper = %s\n AND p.is_suspicious IS NOT true\n AND p.claimed_time IS NOT NULL\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , username_lower\n\n \"\"\"\n tips = list(db.fetchall(TIPS, (self.username,)))\n\n UNCLAIMED_TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n , e.platform\n , e.user_info\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n JOIN elsewhere e ON e.participant = t.tippee\n WHERE tipper = %s\n AND p.is_suspicious IS NOT true\n AND p.claimed_time IS NULL\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , lower(user_info->'screen_name')\n , lower(user_info->'username')\n , lower(user_info->'login')\n\n \"\"\"\n unclaimed_tips = list(db.fetchall(UNCLAIMED_TIPS, (self.username,)))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want\n # to show the total amount they've pledged (so they're not surprised\n # when someone *does* start accepting tips and all of a sudden they're\n # hit with bigger charges.\n\n total = sum([t['amount'] for t in tips])\n if not total:\n # If tips is an empty list, total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n unclaimed_total = sum([t['amount'] for t in unclaimed_tips])\n if not unclaimed_total:\n unclaimed_total = Decimal('0.00')\n\n return tips, total, unclaimed_tips, unclaimed_total\n\n\n @require_username\n def get_tips_and_total(self, for_payday=False, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used by the payday function. If for_payday is not\n False it must be a date object. Originally we also used this function\n to populate the profile page, but our requirements there changed while,\n oddly, our requirements in payday *also* changed to match the old\n requirements of the profile page. So this function keeps the for_payday\n parameter after all.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n if for_payday:\n\n # For payday we want the oldest relationship to be paid first.\n order_by = \"ctime ASC\"\n\n\n # This is where it gets crash-proof.\n # ==================================\n # We need to account for the fact that we may have crashed during\n # Payday and we're re-running that function. We only want to select\n # tips that existed before Payday started, but haven't been\n # processed as part of this Payday yet.\n #\n # It's a bug if the paydays subselect returns > 1 rows.\n #\n # XXX If we crash during Payday and we rerun it after a timezone\n # change, will we get burned? How?\n\n ts_filter = \"\"\"\\\n\n AND mtime < %s\n AND ( SELECT id\n FROM transfers\n WHERE tipper=t.tipper\n AND tippee=t.tippee\n AND timestamp >= %s\n ) IS NULL\n\n \"\"\"\n args = (self.username, for_payday, for_payday)\n else:\n order_by = \"amount DESC\"\n ts_filter = \"\"\n args = (self.username,)\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n WHERE tipper = %%s\n AND p.is_suspicious IS NOT true\n %s\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY %s\n , tippee\n\n \"\"\" % (ts_filter, order_by) # XXX, No injections here, right?!\n tips = list(db.fetchall(TIPS, args))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want\n # to show the total amount they've pledged (so they're not surprised\n # when someone *does* start accepting tips and all of a sudden they're\n # hit with bigger charges.\n\n if for_payday:\n to_total = [t for t in tips if t['claimed_time'] is not None]\n else:\n to_total = tips\n total = sum([t['amount'] for t in to_total])\n\n if not total:\n # If to_total is an empty list, total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n return tips, total\n\n\n\n # Accounts Elsewhere\n # ==================\n\n @require_username\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given two unicodes, raise WontProceed or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the Gittip participant represented by self. Every account\n elsewhere has an associated Gittip participant account, even if its\n only a stub participant (it allows us to track pledges to that account\n should they ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n typecheck(platform, unicode, user_id, unicode, have_confirmation, bool)\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %s AS tippee, sum(amount)\n FROM ( SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS unique_tips\n WHERE (tippee=%s OR tippee=%s)\n AND NOT (tipper=%s AND tippee=%s)\n AND NOT (tipper=%s)\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %s AS tipper, tippee, sum(amount)\n FROM ( SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS unique_tips\n WHERE (tipper=%s OR tipper=%s)\n AND NOT (tipper=%s AND tippee=%s)\n AND NOT (tippee=%s)\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT DISTINCT ON (tipper) ctime, tipper, tippee, 0 AS amount\n FROM tips\n WHERE tippee=%s\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT DISTINCT ON (tippee) ctime, tipper, tippee, 0 AS amount\n FROM tips\n WHERE tipper=%s\n\n \"\"\"\n\n with gittip.db.get_transaction() as txn:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n txn.execute(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id))\n rec = txn.fetchone()\n assert rec is not None # sanity check\n\n other_username = rec['participant']\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec['is_stub']\n\n # this_is_others_last_account_elsewhere\n txn.execute( \"SELECT count(*) AS nelsewhere FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n nelsewhere = txn.fetchone()['nelsewhere']\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = nelsewhere == 1\n\n # we_already_have_that_kind_of_account\n txn.execute( \"SELECT count(*) AS nparticipants FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n nparticipants = txn.fetchone()['nparticipants']\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(txn)\n txn.execute( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n txn.execute( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n txn.execute(CONSOLIDATE_TIPS_RECEIVING, (x, x,y, x,y, x))\n txn.execute(CONSOLIDATE_TIPS_GIVING, (x, x,y, x,y, x))\n txn.execute(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n txn.execute(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n txn.execute(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username)\n )\n rec = txn.fetchone()\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert rec is not None # sanity checks\n assert rec['username'] == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n txn.execute( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , (other_username, self.username, archive_username)\n )\n\n\n # Lastly, keep account_elsewhere in sync.\n # =======================================\n # Bandaid for\n #\n # https://github.com/gittip/www.gittip.com/issues/421\n #\n # XXX This is why we're porting to SQLAlchemy:\n #\n # https://github.com/gittip/www.gittip.com/issues/129\n\n account_elsewhere.participant = self.username\n", "path": "gittip/participant.py" } ]
[ { "content": "\"\"\"Defines a Participant class.\n\"\"\"\nimport random\nimport re\nimport uuid\nfrom decimal import Decimal\n\nimport gittip\nfrom aspen import Response\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \".,-_;:@ \")\n\n\nclass NoParticipantId(Exception):\n \"\"\"Represent a bug where we treat an anonymous user as a participant.\n \"\"\"\n\n\nclass NeedConfirmation(Exception):\n \"\"\"We need confirmation before we'll proceed.\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\ndef gen_random_usernames():\n \"\"\"Yield up to 100 random usernames.\n \"\"\"\n seatbelt = 0\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n seatbelt += 1\n if seatbelt > 100:\n raise StopIteration\n\n\ndef reserve_a_random_username(db=None):\n \"\"\"Reserve and a random username.\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n if db is None: # During take_over we want to use our own transaction.\n db = gittip.db\n\n for username in gen_random_usernames():\n try:\n db.execute( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s)\"\n , (username, username.lower())\n )\n except IntegrityError: # Collision, try again with another value.\n pass\n else:\n break\n\n return username\n\n\ndef require_username(func):\n # XXX This should be done with a metaclass, maybe?\n def wrapped(self, *a, **kw):\n if self.username is None:\n raise NoParticipantId(\"User does not participate, apparently.\")\n return func(self, *a, **kw)\n return wrapped\n\n\nclass Participant(object):\n \"\"\"Represent a Gittip participant.\n \"\"\"\n\n class NoSelfTipping(Exception): pass\n class BadAmount(Exception): pass\n\n\n def __init__(self, username):\n typecheck(username, (unicode, None))\n self.username = username\n\n\n @require_username\n def get_details(self):\n \"\"\"Return a dictionary.\n \"\"\"\n SELECT = \"\"\"\n\n SELECT *\n FROM participants\n WHERE username = %s\n\n \"\"\"\n return gittip.db.fetchone(SELECT, (self.username,))\n\n\n # API Key\n # =======\n\n @require_username\n def recreate_api_key(self):\n api_key = str(uuid.uuid4())\n SQL = \"UPDATE participants SET api_key=%s WHERE username=%s\"\n gittip.db.execute(SQL, (api_key, self.username))\n return api_key\n\n\n # Claiming\n # ========\n # An unclaimed Participant is a stub that's created when someone pledges to\n # give to an AccountElsewhere that's not been connected on Gittip yet.\n\n @require_username\n def resolve_unclaimed(self):\n \"\"\"Given a username, return an URL path.\n \"\"\"\n rec = gittip.db.fetchone(\"SELECT platform, user_info FROM elsewhere \"\n \"WHERE participant = %s\", (self.username,))\n if rec is None:\n out = None\n elif rec['platform'] == 'github':\n out = '/on/github/%s/' % rec['user_info']['login']\n else:\n assert rec['platform'] == 'twitter'\n out = '/on/twitter/%s/' % rec['user_info']['screen_name']\n return out\n\n @require_username\n def set_as_claimed(self):\n CLAIM = \"\"\"\\\n\n UPDATE participants\n SET claimed_time=CURRENT_TIMESTAMP\n WHERE username=%s\n AND claimed_time IS NULL\n\n \"\"\"\n gittip.db.execute(CLAIM, (self.username,))\n\n @require_username\n def insert_into_communities(self, is_member, name, slug):\n username = self.username\n gittip.db.execute(\"\"\"\n\n INSERT INTO communities\n (ctime, name, slug, participant, is_member)\n VALUES ( COALESCE (( SELECT ctime\n FROM communities\n WHERE (participant=%s AND slug=%s)\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %s, %s, %s, %s\n )\n RETURNING ( SELECT count(*) = 0\n FROM communities\n WHERE participant=%s\n )\n AS first_time_community\n\n \"\"\", (username, slug, name, slug, username, is_member, username))\n\n @require_username\n def change_username(self, suggested):\n \"\"\"Raise Response or return None.\n\n We want to be pretty loose with usernames. Unicode is allowed--XXX\n aspen bug :(. So are spaces.Control characters aren't. We also limit to\n 32 characters in length.\n\n \"\"\"\n for i, c in enumerate(suggested):\n if i == 32:\n raise Response(413) # Request Entity Too Large (more or less)\n elif ord(c) < 128 and c not in ASCII_ALLOWED_IN_USERNAME:\n raise Response(400) # Yeah, no.\n elif c not in ASCII_ALLOWED_IN_USERNAME:\n raise Response(400) # XXX Burned by an Aspen bug. :`-(\n # https://github.com/whit537/aspen/issues/102\n\n if suggested in gittip.RESTRICTED_USERNAMES:\n raise Response(400)\n\n if suggested != self.username:\n # Will raise IntegrityError if the desired username is taken.\n rec = gittip.db.fetchone(\"UPDATE participants \"\n \"SET username=%s WHERE username=%s \"\n \"RETURNING username\",\n (suggested, self.username))\n\n assert rec is not None # sanity check\n assert suggested == rec['username'] # sanity check\n self.username = suggested\n\n\n @require_username\n def get_accounts_elsewhere(self):\n \"\"\"Return a two-tuple of elsewhere dicts.\n \"\"\"\n ACCOUNTS = \"\"\"\n SELECT * FROM elsewhere WHERE participant=%s;\n \"\"\"\n accounts = gittip.db.fetchall(ACCOUNTS, (self.username,))\n assert accounts is not None\n twitter_account = None\n github_account = None\n for account in accounts:\n if account['platform'] == 'github':\n github_account = account\n else:\n assert account['platform'] == 'twitter', account['platform']\n twitter_account = account\n return (github_account, twitter_account)\n\n\n @require_username\n def set_tip_to(self, tippee, amount):\n \"\"\"Given participant id and amount as str, return a tuple.\n\n We INSERT instead of UPDATE, so that we have history to explore. The\n COALESCE function returns the first of its arguments that is not NULL.\n The effect here is to stamp all tips with the timestamp of the first\n tip from this user to that. I believe this is used to determine the\n order of transfers during payday.\n\n The tuple returned is the amount as a Decimal and a boolean indicating\n whether this is the first time this tipper has tipped (we want to track\n that as part of our conversion funnel).\n\n \"\"\"\n\n if self.username == tippee:\n raise self.NoSelfTipping\n\n amount = Decimal(amount) # May raise InvalidOperation\n hi = gittip.AMOUNTS[0]\n lo = gittip.AMOUNTS[-1]\n if (amount < lo) or (amount > hi):\n raise self.BadAmount\n\n NEW_TIP = \"\"\"\\\n\n INSERT INTO tips\n (ctime, tipper, tippee, amount)\n VALUES ( COALESCE (( SELECT ctime\n FROM tips\n WHERE (tipper=%s AND tippee=%s)\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %s, %s, %s\n )\n RETURNING ( SELECT count(*) = 0 FROM tips WHERE tipper=%s )\n AS first_time_tipper\n\n \"\"\"\n args = (self.username, tippee, self.username, tippee, amount, \\\n self.username)\n first_time_tipper = \\\n gittip.db.fetchone(NEW_TIP, args)['first_time_tipper']\n return amount, first_time_tipper\n\n\n @require_username\n def get_tip_to(self, tippee):\n \"\"\"Given two user ids, return a Decimal.\n \"\"\"\n TIP = \"\"\"\\\n\n SELECT amount\n FROM tips\n WHERE tipper=%s\n AND tippee=%s\n ORDER BY mtime DESC\n LIMIT 1\n\n \"\"\"\n rec = gittip.db.fetchone(TIP, (self.username, tippee))\n if rec is None:\n tip = Decimal('0.00')\n else:\n tip = rec['amount']\n return tip\n\n\n @require_username\n def get_dollars_receiving(self):\n \"\"\"Return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS dollars_receiving\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n amount = None\n else:\n amount = rec['dollars_receiving'] # might be None\n\n if amount is None:\n amount = Decimal('0.00')\n\n return amount\n\n\n @require_username\n def get_dollars_giving(self):\n \"\"\"Return a Decimal.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT sum(amount) AS dollars_giving\n FROM ( SELECT DISTINCT ON (tippee)\n amount\n , tippee\n FROM tips\n JOIN participants p ON p.username = tippee\n WHERE tipper=%s\n AND is_suspicious IS NOT true\n AND claimed_time IS NOT NULL\n ORDER BY tippee\n , mtime DESC\n ) AS foo\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n amount = None\n else:\n amount = rec['dollars_giving'] # might be None\n\n if amount is None:\n amount = Decimal('0.00')\n\n return amount\n\n\n @require_username\n def get_number_of_backers(self):\n \"\"\"Given a unicode, return an int.\n \"\"\"\n\n BACKED = \"\"\"\\\n\n SELECT count(amount) AS nbackers\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n\n \"\"\"\n rec = gittip.db.fetchone(BACKED, (self.username,))\n if rec is None:\n nbackers = None\n else:\n nbackers = rec['nbackers'] # might be None\n\n if nbackers is None:\n nbackers = 0\n\n return nbackers\n\n\n @require_username\n def get_tip_distribution(self):\n SQL = \"\"\"\n\n SELECT amount\n , count(amount) AS ncontributing\n FROM ( SELECT DISTINCT ON (tipper)\n amount\n , tipper\n FROM tips\n JOIN participants p ON p.username = tipper\n WHERE tippee=%s\n AND last_bill_result = ''\n AND is_suspicious IS NOT true\n ORDER BY tipper\n , mtime DESC\n ) AS foo\n WHERE amount > 0\n GROUP BY amount\n ORDER BY amount\n\n \"\"\"\n npatrons = 0.0 # float to trigger float division\n contributed = Decimal('0.00')\n other = [-1, 0, 0] # accumulates old tip amounts\n out = []\n for rec in gittip.db.fetchall(SQL, (self.username,)):\n if rec['amount'] not in gittip.AMOUNTS:\n other[1] += rec['ncontributing']\n other[2] += rec['amount'] * rec['ncontributing']\n contributed += rec['amount'] * rec['ncontributing']\n else:\n out.append([ rec['amount']\n , rec['ncontributing']\n , rec['amount'] * rec['ncontributing']\n ])\n contributed += out[-1][2]\n npatrons += rec['ncontributing']\n if other != [-1, 0, 0]:\n out.append(other)\n for row in out:\n row.append((row[1] / npatrons) if npatrons > 0 else 0)\n row.append((row[2] / contributed) if contributed > 0 else 0)\n return out, npatrons, contributed\n\n\n @require_username\n def get_giving_for_profile(self, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used to populate a participant's page for their own\n viewing pleasure.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n , p.username_lower\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n WHERE tipper = %s\n AND p.is_suspicious IS NOT true\n AND p.claimed_time IS NOT NULL\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , username_lower\n\n \"\"\"\n tips = list(db.fetchall(TIPS, (self.username,)))\n\n UNCLAIMED_TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n , e.platform\n , e.user_info\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n JOIN elsewhere e ON e.participant = t.tippee\n WHERE tipper = %s\n AND p.is_suspicious IS NOT true\n AND p.claimed_time IS NULL\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY amount DESC\n , lower(user_info->'screen_name')\n , lower(user_info->'username')\n , lower(user_info->'login')\n\n \"\"\"\n unclaimed_tips = list(db.fetchall(UNCLAIMED_TIPS, (self.username,)))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want\n # to show the total amount they've pledged (so they're not surprised\n # when someone *does* start accepting tips and all of a sudden they're\n # hit with bigger charges.\n\n total = sum([t['amount'] for t in tips])\n if not total:\n # If tips is an empty list, total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n unclaimed_total = sum([t['amount'] for t in unclaimed_tips])\n if not unclaimed_total:\n unclaimed_total = Decimal('0.00')\n\n return tips, total, unclaimed_tips, unclaimed_total\n\n\n @require_username\n def get_tips_and_total(self, for_payday=False, db=None):\n \"\"\"Given a participant id and a date, return a list and a Decimal.\n\n This function is used by the payday function. If for_payday is not\n False it must be a date object. Originally we also used this function\n to populate the profile page, but our requirements there changed while,\n oddly, our requirements in payday *also* changed to match the old\n requirements of the profile page. So this function keeps the for_payday\n parameter after all.\n\n A half-injected dependency, that's what db is.\n\n \"\"\"\n if db is None:\n from gittip import db\n\n if for_payday:\n\n # For payday we want the oldest relationship to be paid first.\n order_by = \"ctime ASC\"\n\n\n # This is where it gets crash-proof.\n # ==================================\n # We need to account for the fact that we may have crashed during\n # Payday and we're re-running that function. We only want to select\n # tips that existed before Payday started, but haven't been\n # processed as part of this Payday yet.\n #\n # It's a bug if the paydays subselect returns > 1 rows.\n #\n # XXX If we crash during Payday and we rerun it after a timezone\n # change, will we get burned? How?\n\n ts_filter = \"\"\"\\\n\n AND mtime < %s\n AND ( SELECT id\n FROM transfers\n WHERE tipper=t.tipper\n AND tippee=t.tippee\n AND timestamp >= %s\n ) IS NULL\n\n \"\"\"\n args = (self.username, for_payday, for_payday)\n else:\n order_by = \"amount DESC\"\n ts_filter = \"\"\n args = (self.username,)\n\n TIPS = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (tippee)\n amount\n , tippee\n , t.ctime\n , p.claimed_time\n FROM tips t\n JOIN participants p ON p.username = t.tippee\n WHERE tipper = %%s\n AND p.is_suspicious IS NOT true\n %s\n ORDER BY tippee\n , t.mtime DESC\n ) AS foo\n ORDER BY %s\n , tippee\n\n \"\"\" % (ts_filter, order_by) # XXX, No injections here, right?!\n tips = list(db.fetchall(TIPS, args))\n\n\n # Compute the total.\n # ==================\n # For payday we only want to process payments to tippees who have\n # themselves opted into Gittip. For the tipper's profile page we want\n # to show the total amount they've pledged (so they're not surprised\n # when someone *does* start accepting tips and all of a sudden they're\n # hit with bigger charges.\n\n if for_payday:\n to_total = [t for t in tips if t['claimed_time'] is not None]\n else:\n to_total = tips\n total = sum([t['amount'] for t in to_total])\n\n if not total:\n # If to_total is an empty list, total is int 0. We want a Decimal.\n total = Decimal('0.00')\n\n return tips, total\n\n\n\n # Accounts Elsewhere\n # ==================\n\n @require_username\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given two unicodes, raise WontProceed or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the Gittip participant represented by self. Every account\n elsewhere has an associated Gittip participant account, even if its\n only a stub participant (it allows us to track pledges to that account\n should they ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n typecheck(platform, unicode, user_id, unicode, have_confirmation, bool)\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %s AS tippee, sum(amount)\n FROM ( SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS unique_tips\n WHERE (tippee=%s OR tippee=%s)\n AND NOT (tipper=%s AND tippee=%s)\n AND NOT (tipper=%s)\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %s AS tipper, tippee, sum(amount)\n FROM ( SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS unique_tips\n WHERE (tipper=%s OR tipper=%s)\n AND NOT (tipper=%s AND tippee=%s)\n AND NOT (tippee=%s)\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT DISTINCT ON (tipper) ctime, tipper, tippee, 0 AS amount\n FROM tips\n WHERE tippee=%s\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT DISTINCT ON (tippee) ctime, tipper, tippee, 0 AS amount\n FROM tips\n WHERE tipper=%s\n\n \"\"\"\n\n with gittip.db.get_transaction() as txn:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n txn.execute(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id))\n rec = txn.fetchone()\n assert rec is not None # sanity check\n\n other_username = rec['participant']\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec['is_stub']\n\n # this_is_others_last_account_elsewhere\n txn.execute( \"SELECT count(*) AS nelsewhere FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n nelsewhere = txn.fetchone()['nelsewhere']\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = nelsewhere == 1\n\n # we_already_have_that_kind_of_account\n txn.execute( \"SELECT count(*) AS nparticipants FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n nparticipants = txn.fetchone()['nparticipants']\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(txn)\n txn.execute( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n txn.execute( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n txn.execute(CONSOLIDATE_TIPS_RECEIVING, (x, x,y, x,y, x))\n txn.execute(CONSOLIDATE_TIPS_GIVING, (x, x,y, x,y, x))\n txn.execute(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n txn.execute(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n txn.execute(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username)\n )\n rec = txn.fetchone()\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert rec is not None # sanity checks\n assert rec['username'] == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n txn.execute( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , (other_username, self.username, archive_username)\n )\n\n\n # Lastly, keep account_elsewhere in sync.\n # =======================================\n # Bandaid for\n #\n # https://github.com/gittip/www.gittip.com/issues/421\n #\n # XXX This is why we're porting to SQLAlchemy:\n #\n # https://github.com/gittip/www.gittip.com/issues/129\n\n account_elsewhere.participant = self.username\n", "path": "gittip/participant.py" } ]
diff --git a/gittip/participant.py b/gittip/participant.py index 0b39b4e88b..f10b246f7c 100644 --- a/gittip/participant.py +++ b/gittip/participant.py @@ -9,7 +9,6 @@ from aspen import Response from aspen.utils import typecheck from psycopg2 import IntegrityError -from gittip.models import community ASCII_ALLOWED_IN_USERNAME = set("0123456789"
optuna__optuna-5306
Feature to determine whether `GridSampler` has exhausted the grid ### Motivation Currently, there is no good way in which we can determine whether the grid of `GridSampler` is exhausted when resuming a study. While the method `_get_unvisited_grid_ids()` exists, it is a protected method and should not be used outside of the library. ### Description I suggest adding a method called something like `is_exhausted()` that is public and simply returns `len(self._get_unvisited_grid_ids()) == 0` ### Alternatives (optional) Alternatively, it may be useful to add keyword arguments to be passed to `GridSampler` that give more control over what happens if the grid is already exhausted. For example, one could add a keyword like `reevaluate_when_exhausted: bool`. ### Additional context (optional) I encountered this suggestion when receiving the following warning: `GridSampler` is re-evaluating a configuration because the grid has been exhausted. This may happen due to a timing issue during distributed optimization or when re-running optimizations on already finished studies." This issue further builds upon the discussion of https://github.com/optuna/optuna/issues/3256
[ { "content": "import itertools\nfrom numbers import Real\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.samplers._lazy_random_state import LazyRandomState\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating\n the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically\n stops the optimization if all combinations in the passed ``search_space`` have already\n been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method.\n As a workaround, we need to handle the error manually as in\n https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n seed:\n A seed to fix the order of trials as the grid is randomly shuffled. Please note that\n it is not recommended using this option in distributed optimization settings since\n this option cannot ensure the order of trials and may increase the number of duplicate\n suggestions during distributed optimization.\n \"\"\"\n\n def __init__(\n self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None\n ) -> None:\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = {}\n for param_name, param_values in sorted(search_space.items()):\n self._search_space[param_name] = list(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n self._rng = LazyRandomState(seed)\n self._rng.rng.shuffle(self._all_grids)\n\n def reseed_rng(self) -> None:\n self._rng.rng.seed()\n\n def before_trial(self, study: Study, trial: FrozenTrial) -> None:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return\n\n if 0 <= trial.number and trial.number < self._n_min_trials:\n study._storage.set_trial_system_attr(\n trial._trial_id, \"search_space\", self._search_space\n )\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", trial.number)\n return\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = int(self._rng.rng.choice(target_grids))\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n @staticmethod\n def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:\n value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))\n value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))\n return (value1 == value2) or (value1_is_nan and value2_is_nan)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n for i, param_value in enumerate(search_space[param_name]):\n if not self._grid_value_equal(param_value, self._search_space[param_name][i]):\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py" } ]
[ { "content": "import itertools\nfrom numbers import Real\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.samplers._lazy_random_state import LazyRandomState\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating\n the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically\n stops the optimization if all combinations in the passed ``search_space`` have already\n been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method.\n As a workaround, we need to handle the error manually as in\n https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n seed:\n A seed to fix the order of trials as the grid is randomly shuffled. Please note that\n it is not recommended using this option in distributed optimization settings since\n this option cannot ensure the order of trials and may increase the number of duplicate\n suggestions during distributed optimization.\n \"\"\"\n\n def __init__(\n self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None\n ) -> None:\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = {}\n for param_name, param_values in sorted(search_space.items()):\n self._search_space[param_name] = list(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n self._rng = LazyRandomState(seed)\n self._rng.rng.shuffle(self._all_grids)\n\n def reseed_rng(self) -> None:\n self._rng.rng.seed()\n\n def before_trial(self, study: Study, trial: FrozenTrial) -> None:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return\n\n if 0 <= trial.number and trial.number < self._n_min_trials:\n study._storage.set_trial_system_attr(\n trial._trial_id, \"search_space\", self._search_space\n )\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", trial.number)\n return\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = int(self._rng.rng.choice(target_grids))\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n @staticmethod\n def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:\n value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))\n value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))\n return (value1 == value2) or (value1_is_nan and value2_is_nan)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n for i, param_value in enumerate(search_space[param_name]):\n if not self._grid_value_equal(param_value, self._search_space[param_name][i]):\n return False\n\n return True\n\n def is_exhausted(self, study: Study) -> bool:\n \"\"\"\n Return True if all the possible params are evaluated, otherwise return False.\n \"\"\"\n return len(self._get_unvisited_grid_ids(study)) == 0\n", "path": "optuna/samplers/_grid.py" } ]
diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py index 2355b7b466..722ca460d4 100644 --- a/optuna/samplers/_grid.py +++ b/optuna/samplers/_grid.py @@ -277,3 +277,9 @@ def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]] return False return True + + def is_exhausted(self, study: Study) -> bool: + """ + Return True if all the possible params are evaluated, otherwise return False. + """ + return len(self._get_unvisited_grid_ids(study)) == 0 diff --git a/tests/samplers_tests/test_grid.py b/tests/samplers_tests/test_grid.py index cbdbe0dfce..deb57a7b06 100644 --- a/tests/samplers_tests/test_grid.py +++ b/tests/samplers_tests/test_grid.py @@ -254,3 +254,12 @@ def test_nan() -> None: lambda trial: 1 if np.isnan(trial.suggest_categorical("x", [0, float("nan")])) else 0 ) assert len(study.get_trials()) == 2 + + +def test_is_exhausted() -> None: + search_space = {"a": [0, 50]} + sampler = samplers.GridSampler(search_space) + study = optuna.create_study(sampler=sampler) + assert not sampler.is_exhausted(study) + study.optimize(lambda trial: trial.suggest_categorical("a", [0, 50])) + assert sampler.is_exhausted(study)
ipython__ipython-3556
_margv for macros seems to be missing At one point in time, arguments to macro's could be obtained from _margv , but this seems to be missing now ( https://github.com/ipython/ipython/wiki/Cookbook:-Macro-arguments ). I searched the entire ipython folder and only found _margv in the documentation in the macro.py file. Just wondering if this is still supported.
[ { "content": "\"\"\"Support for interactive macros in IPython\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2001-2005 Fernando Perez <[email protected]>\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#*****************************************************************************\n\nimport re\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\ncoding_declaration = re.compile(r\"#\\s*coding[:=]\\s*([-\\w.]+)\")\n\nclass Macro(object):\n \"\"\"Simple class to store the value of macros as strings.\n\n Macro is just a callable that executes a string of IPython\n input when called.\n \n Args to macro are available in _margv list if you need them.\n \"\"\"\n\n def __init__(self,code):\n \"\"\"store the macro value, as a single string which can be executed\"\"\"\n lines = []\n enc = None\n for line in code.splitlines():\n coding_match = coding_declaration.match(line)\n if coding_match:\n enc = coding_match.group(1)\n else:\n lines.append(line)\n code = \"\\n\".join(lines)\n if isinstance(code, bytes):\n code = code.decode(enc or DEFAULT_ENCODING)\n self.value = code + '\\n'\n \n def __str__(self):\n return py3compat.unicode_to_str(self.value)\n \n def __unicode__(self):\n return self.value\n\n def __repr__(self):\n return 'IPython.macro.Macro(%s)' % repr(self.value)\n \n def __getstate__(self):\n \"\"\" needed for safe pickling via %store \"\"\"\n return {'value': self.value}\n \n def __add__(self, other):\n if isinstance(other, Macro):\n return Macro(self.value + other.value)\n elif isinstance(other, basestring):\n return Macro(self.value + other)\n raise TypeError\n", "path": "IPython/core/macro.py" } ]
[ { "content": "\"\"\"Support for interactive macros in IPython\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2001-2005 Fernando Perez <[email protected]>\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#*****************************************************************************\n\nimport re\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\ncoding_declaration = re.compile(r\"#\\s*coding[:=]\\s*([-\\w.]+)\")\n\nclass Macro(object):\n \"\"\"Simple class to store the value of macros as strings.\n\n Macro is just a callable that executes a string of IPython\n input when called.\n \"\"\"\n\n def __init__(self,code):\n \"\"\"store the macro value, as a single string which can be executed\"\"\"\n lines = []\n enc = None\n for line in code.splitlines():\n coding_match = coding_declaration.match(line)\n if coding_match:\n enc = coding_match.group(1)\n else:\n lines.append(line)\n code = \"\\n\".join(lines)\n if isinstance(code, bytes):\n code = code.decode(enc or DEFAULT_ENCODING)\n self.value = code + '\\n'\n \n def __str__(self):\n return py3compat.unicode_to_str(self.value)\n \n def __unicode__(self):\n return self.value\n\n def __repr__(self):\n return 'IPython.macro.Macro(%s)' % repr(self.value)\n \n def __getstate__(self):\n \"\"\" needed for safe pickling via %store \"\"\"\n return {'value': self.value}\n \n def __add__(self, other):\n if isinstance(other, Macro):\n return Macro(self.value + other.value)\n elif isinstance(other, basestring):\n return Macro(self.value + other)\n raise TypeError\n", "path": "IPython/core/macro.py" } ]
diff --git a/IPython/core/macro.py b/IPython/core/macro.py index ca46266d358..8d567994327 100644 --- a/IPython/core/macro.py +++ b/IPython/core/macro.py @@ -19,8 +19,6 @@ class Macro(object): Macro is just a callable that executes a string of IPython input when called. - - Args to macro are available in _margv list if you need them. """ def __init__(self,code):
cloudtools__troposphere-2238
Update DLM Interval Rule Values Update DLM valid intervals. `1` has been added. [DLM interval rule allows ](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dlm-lifecyclepolicy-createrule.html)
[ { "content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\nfrom . import tags_or_list\n\n\ndef validate_tags_or_list(x):\n \"\"\"\n Property: LifecyclePolicy.Tags\n Property: PolicyDetails.TargetTags\n Property: Schedule.TagsToAdd\n \"\"\"\n return tags_or_list(x)\n\n\ndef validate_interval(interval):\n \"\"\"\n Interval validation rule.\n Property: CreateRule.Interval\n \"\"\"\n\n VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)\n\n if interval not in VALID_INTERVALS:\n raise ValueError(\n \"Interval must be one of : %s\"\n % \", \".join([str(i) for i in VALID_INTERVALS])\n )\n return interval\n\n\ndef validate_interval_unit(interval_unit):\n \"\"\"\n Interval unit validation rule.\n Property: CreateRule.IntervalUnit\n \"\"\"\n\n VALID_INTERVAL_UNITS = (\"HOURS\",)\n\n if interval_unit not in VALID_INTERVAL_UNITS:\n raise ValueError(\n \"Interval unit must be one of : %s\" % \", \".join(VALID_INTERVAL_UNITS)\n )\n return interval_unit\n\n\ndef validate_state(state):\n \"\"\"\n State validation rule.\n Property: LifecyclePolicy.State\n \"\"\"\n\n VALID_STATES = (\"ENABLED\", \"DISABLED\")\n\n if state not in VALID_STATES:\n raise ValueError(\"State must be one of : %s\" % \", \".join(VALID_STATES))\n return state\n", "path": "troposphere/validators/dlm.py" } ]
[ { "content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\nfrom . import tags_or_list\n\n\ndef validate_tags_or_list(x):\n \"\"\"\n Property: LifecyclePolicy.Tags\n Property: PolicyDetails.TargetTags\n Property: Schedule.TagsToAdd\n \"\"\"\n return tags_or_list(x)\n\n\ndef validate_interval(interval):\n \"\"\"\n Interval validation rule.\n Property: CreateRule.Interval\n \"\"\"\n\n VALID_INTERVALS = (1, 2, 3, 4, 6, 8, 12, 24)\n\n if interval not in VALID_INTERVALS:\n raise ValueError(\n \"Interval must be one of : %s\"\n % \", \".join([str(i) for i in VALID_INTERVALS])\n )\n return interval\n\n\ndef validate_interval_unit(interval_unit):\n \"\"\"\n Interval unit validation rule.\n Property: CreateRule.IntervalUnit\n \"\"\"\n\n VALID_INTERVAL_UNITS = (\"HOURS\",)\n\n if interval_unit not in VALID_INTERVAL_UNITS:\n raise ValueError(\n \"Interval unit must be one of : %s\" % \", \".join(VALID_INTERVAL_UNITS)\n )\n return interval_unit\n\n\ndef validate_state(state):\n \"\"\"\n State validation rule.\n Property: LifecyclePolicy.State\n \"\"\"\n\n VALID_STATES = (\"ENABLED\", \"DISABLED\")\n\n if state not in VALID_STATES:\n raise ValueError(\"State must be one of : %s\" % \", \".join(VALID_STATES))\n return state\n", "path": "troposphere/validators/dlm.py" } ]
diff --git a/troposphere/validators/dlm.py b/troposphere/validators/dlm.py index 8607402d8..7104742a9 100644 --- a/troposphere/validators/dlm.py +++ b/troposphere/validators/dlm.py @@ -22,7 +22,7 @@ def validate_interval(interval): Property: CreateRule.Interval """ - VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24) + VALID_INTERVALS = (1, 2, 3, 4, 6, 8, 12, 24) if interval not in VALID_INTERVALS: raise ValueError(
Pycord-Development__pycord-1218
Mypy can't type check pycord when namespace_packages are enabled ### Summary Mypy errors when using pycord with namespace_packages flag enabled ### Reproduction Steps Run mypy against a simple pycord setup. An example set up is as follows: ``` my-repo/ ├─ my_bot/ │ ├─ bot.py .mypy.ini ``` Run mypy via: `mypy my_bot/` Mypy config: ```ini [mypy] namespace_packages = True ignore_missing_imports = True ``` ### Minimal Reproducible Code ```python `from discord import ApplicationCommand` in bot.py ``` ### Expected Results Type checking works as expected with `namespace_packages` enabled ### Actual Results Type checking errors with: ```sh virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands" Found 1 error in 1 file (errors prevented further checking) ``` ### Intents N/A ### System Information ```yaml - Python v3.9.5-final - py-cord v2.0.0-beta - py-cord pkg_resources: v2.0.0b3 - aiohttp v3.8.1 - system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64 ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as: ```sh app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"? ``` This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?. Mypy can't type check pycord when namespace_packages are enabled ### Summary Mypy errors when using pycord with namespace_packages flag enabled ### Reproduction Steps Run mypy against a simple pycord setup. An example set up is as follows: ``` my-repo/ ├─ my_bot/ │ ├─ bot.py .mypy.ini ``` Run mypy via: `mypy my_bot/` Mypy config: ```ini [mypy] namespace_packages = True ignore_missing_imports = True ``` ### Minimal Reproducible Code ```python `from discord import ApplicationCommand` in bot.py ``` ### Expected Results Type checking works as expected with `namespace_packages` enabled ### Actual Results Type checking errors with: ```sh virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands" Found 1 error in 1 file (errors prevented further checking) ``` ### Intents N/A ### System Information ```yaml - Python v3.9.5-final - py-cord v2.0.0-beta - py-cord pkg_resources: v2.0.0b3 - aiohttp v3.8.1 - system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64 ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as: ```sh app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"? ``` This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.
[ { "content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands.__init__ import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py" } ]
[ { "content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py" } ]
diff --git a/discord/__init__.py b/discord/__init__.py index 23b819fbe8..5709c31f96 100644 --- a/discord/__init__.py +++ b/discord/__init__.py @@ -30,7 +30,7 @@ from .client import * from .cog import Cog from .colour import * -from .commands.__init__ import * +from .commands import * from .components import * from .embeds import * from .emoji import *
pytorch__rl-530
[BUG] `inferece_mode` decorator is preventing `state_dict` loading ## Describe the bug See title ## To Reproduce Running example command `python sac.py` results in the following error: ``` Traceback (most recent call last): File "/Users/haoranpeng/Desktop/rl/examples/sac/sac.py", line 198, in main recorder_rm.load_state_dict(create_env_fn().state_dict()) File "/Users/haoranpeng/Desktop/rl/torchrl/envs/transforms/transforms.py", line 418, in load_state_dict self.transform.load_state_dict(state_dict, **kwargs) File "/Users/haoranpeng/mambaforge/envs/torch_rl/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1667, in load_state_dict raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( RuntimeError: Error(s) in loading state_dict for Compose: While copying the parameter named "transforms.2.loc", whose dimensions in the model are torch.Size([17]) and whose dimensions in the checkpoint are torch.Size([17]), an exception occurred : ('Inplace update to inference tensor outside InferenceMode is not allowed. ``` ## Reason and Possible fixes Remove `inference_mode` decorator from function `torchrl.trainers.helpers.envs.get_stats_random_rollout()` ## Checklist - [x] I have checked that there is no similar issue in the repo (**required**) - [x] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**) - [x] I have provided a minimal working example to reproduce the bug (**required**)
[ { "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass\nfrom dataclasses import field as dataclass_field\nfrom typing import Callable, Optional, Union, Any, Sequence\n\nimport torch\n\nfrom torchrl.envs import ParallelEnv\nfrom torchrl.envs.common import EnvBase\nfrom torchrl.envs.env_creator import env_creator, EnvCreator\nfrom torchrl.envs.libs.dm_control import DMControlEnv\nfrom torchrl.envs.libs.gym import GymEnv\nfrom torchrl.envs.transforms import (\n CatFrames,\n CatTensors,\n DoubleToFloat,\n FiniteTensorDictCheck,\n GrayScale,\n NoopResetEnv,\n ObservationNorm,\n Resize,\n RewardScaling,\n ToTensorImage,\n TransformedEnv,\n VecNorm,\n CenterCrop,\n)\nfrom torchrl.envs.transforms.transforms import gSDENoise, FlattenObservation\nfrom torchrl.record.recorder import VideoRecorder\nfrom torchrl.trainers.loggers import Logger\n\n__all__ = [\n \"correct_for_frame_skip\",\n \"transformed_env_constructor\",\n \"parallel_env_constructor\",\n \"get_stats_random_rollout\",\n]\n\nLIBS = {\n \"gym\": GymEnv,\n \"dm_control\": DMControlEnv,\n}\n\n\ndef correct_for_frame_skip(cfg: \"DictConfig\") -> \"DictConfig\": # noqa: F821\n \"\"\"\n Correct the arguments for the input frame_skip, by dividing all the arguments that reflect a count of frames by the\n frame_skip.\n This is aimed at avoiding unknowingly over-sampling from the environment, i.e. targetting a total number of frames\n of 1M but actually collecting frame_skip * 1M frames.\n\n Args:\n cfg (DictConfig): DictConfig containing some frame-counting argument, including:\n \"max_frames_per_traj\", \"total_frames\", \"frames_per_batch\", \"record_frames\", \"annealing_frames\",\n \"init_random_frames\", \"init_env_steps\"\n\n Returns:\n the input DictConfig, modified in-place.\n\n \"\"\"\n # Adapt all frame counts wrt frame_skip\n if cfg.frame_skip != 1:\n fields = [\n \"max_frames_per_traj\",\n \"total_frames\",\n \"frames_per_batch\",\n \"record_frames\",\n \"annealing_frames\",\n \"init_random_frames\",\n \"init_env_steps\",\n \"noops\",\n ]\n for field in fields:\n if hasattr(cfg, field):\n setattr(cfg, field, getattr(cfg, field) // cfg.frame_skip)\n return cfg\n\n\ndef make_env_transforms(\n env,\n cfg,\n video_tag,\n logger,\n env_name,\n stats,\n norm_obs_only,\n env_library,\n action_dim_gsde,\n state_dim_gsde,\n batch_dims=0,\n):\n env = TransformedEnv(env)\n\n from_pixels = cfg.from_pixels\n vecnorm = cfg.vecnorm\n norm_rewards = vecnorm and cfg.norm_rewards\n _norm_obs_only = norm_obs_only or not norm_rewards\n reward_scaling = cfg.reward_scaling\n reward_loc = cfg.reward_loc\n\n if len(video_tag):\n center_crop = cfg.center_crop\n if center_crop:\n center_crop = center_crop[0]\n env.append_transform(\n VideoRecorder(\n logger=logger,\n tag=f\"{video_tag}_{env_name}_video\",\n center_crop=center_crop,\n ),\n )\n\n if cfg.noops:\n env.append_transform(NoopResetEnv(cfg.noops))\n\n if from_pixels:\n if not cfg.catframes:\n raise RuntimeError(\n \"this env builder currently only accepts positive catframes values\"\n \"when pixels are being used.\"\n )\n env.append_transform(ToTensorImage())\n if cfg.center_crop:\n env.append_transform(CenterCrop(*cfg.center_crop))\n env.append_transform(Resize(cfg.image_size, cfg.image_size))\n if cfg.grayscale:\n env.append_transform(GrayScale())\n env.append_transform(FlattenObservation())\n env.append_transform(CatFrames(N=cfg.catframes, keys_in=[\"next_pixels\"]))\n if stats is None:\n obs_stats = {\"loc\": 0.0, \"scale\": 1.0}\n else:\n obs_stats = stats\n obs_stats[\"standard_normal\"] = True\n env.append_transform(ObservationNorm(**obs_stats, keys_in=[\"next_pixels\"]))\n if norm_rewards:\n reward_scaling = 1.0\n reward_loc = 0.0\n if norm_obs_only:\n reward_scaling = 1.0\n reward_loc = 0.0\n if reward_scaling is not None:\n env.append_transform(RewardScaling(reward_loc, reward_scaling))\n\n double_to_float_list = []\n double_to_float_inv_list = []\n if env_library is DMControlEnv:\n double_to_float_list += [\n \"reward\",\n ]\n double_to_float_list += [\n \"action\",\n ]\n double_to_float_inv_list += [\"action\"] # DMControl requires double-precision\n if not from_pixels:\n selected_keys = [\n key\n for key in env.observation_spec.keys()\n if (\"pixels\" not in key)\n and (key.replace(\"next_\", \"\") not in env.input_spec.keys())\n ]\n\n # even if there is a single tensor, it'll be renamed in \"next_observation_vector\"\n out_key = \"next_observation_vector\"\n env.append_transform(CatTensors(keys_in=selected_keys, out_key=out_key))\n\n if not vecnorm:\n if stats is None:\n _stats = {\"loc\": 0.0, \"scale\": 1.0}\n else:\n _stats = stats\n env.append_transform(\n ObservationNorm(**_stats, keys_in=[out_key], standard_normal=True)\n )\n else:\n env.append_transform(\n VecNorm(\n keys_in=[out_key, \"reward\"] if not _norm_obs_only else [out_key],\n decay=0.9999,\n )\n )\n\n double_to_float_list.append(out_key)\n env.append_transform(\n DoubleToFloat(\n keys_in=double_to_float_list, keys_inv_in=double_to_float_inv_list\n )\n )\n\n if hasattr(cfg, \"catframes\") and cfg.catframes:\n env.append_transform(\n CatFrames(N=cfg.catframes, keys_in=[out_key], cat_dim=-1)\n )\n\n else:\n env.append_transform(\n DoubleToFloat(\n keys_in=double_to_float_list, keys_inv_in=double_to_float_inv_list\n )\n )\n\n if hasattr(cfg, \"gSDE\") and cfg.gSDE:\n env.append_transform(\n gSDENoise(action_dim=action_dim_gsde, state_dim=state_dim_gsde)\n )\n\n env.append_transform(FiniteTensorDictCheck())\n return env\n\n\ndef transformed_env_constructor(\n cfg: \"DictConfig\", # noqa: F821\n video_tag: str = \"\",\n logger: Optional[Logger] = None,\n stats: Optional[dict] = None,\n norm_obs_only: bool = False,\n use_env_creator: bool = False,\n custom_env_maker: Optional[Callable] = None,\n custom_env: Optional[EnvBase] = None,\n return_transformed_envs: bool = True,\n action_dim_gsde: Optional[int] = None,\n state_dim_gsde: Optional[int] = None,\n batch_dims: Optional[int] = 0,\n) -> Union[Callable, EnvCreator]:\n \"\"\"\n Returns an environment creator from an argparse.Namespace built with the appropriate parser constructor.\n\n Args:\n cfg (DictConfig): a DictConfig containing the arguments of the script.\n video_tag (str, optional): video tag to be passed to the Logger object\n logger (Logger, optional): logger associated with the script\n stats (dict, optional): a dictionary containing the `loc` and `scale` for the `ObservationNorm` transform\n norm_obs_only (bool, optional): If `True` and `VecNorm` is used, the reward won't be normalized online.\n Default is `False`.\n use_env_creator (bool, optional): wheter the `EnvCreator` class should be used. By using `EnvCreator`,\n one can make sure that running statistics will be put in shared memory and accessible for all workers\n when using a `VecNorm` transform. Default is `True`.\n custom_env_maker (callable, optional): if your env maker is not part\n of torchrl env wrappers, a custom callable\n can be passed instead. In this case it will override the\n constructor retrieved from `args`.\n custom_env (EnvBase, optional): if an existing environment needs to be\n transformed_in, it can be passed directly to this helper. `custom_env_maker`\n and `custom_env` are exclusive features.\n return_transformed_envs (bool, optional): if True, a transformed_in environment\n is returned.\n action_dim_gsde (int, Optional): if gSDE is used, this can present the action dim to initialize the noise.\n Make sure this is indicated in environment executed in parallel.\n state_dim_gsde: if gSDE is used, this can present the state dim to initialize the noise.\n Make sure this is indicated in environment executed in parallel.\n batch_dims (int, optional): number of dimensions of a batch of data. If a single env is\n used, it should be 0 (default). If multiple envs are being transformed in parallel,\n it should be set to 1 (or the number of dims of the batch).\n \"\"\"\n\n def make_transformed_env(**kwargs) -> TransformedEnv:\n env_name = cfg.env_name\n env_task = cfg.env_task\n env_library = LIBS[cfg.env_library]\n frame_skip = cfg.frame_skip\n from_pixels = cfg.from_pixels\n\n if custom_env is None and custom_env_maker is None:\n if isinstance(cfg.collector_devices, str):\n device = cfg.collector_devices\n elif isinstance(cfg.collector_devices, Sequence):\n device = cfg.collector_devices[0]\n else:\n raise ValueError(\n \"collector_devices must be either a string or a sequence of strings\"\n )\n env_kwargs = {\n \"env_name\": env_name,\n \"device\": device,\n \"frame_skip\": frame_skip,\n \"from_pixels\": from_pixels or len(video_tag),\n \"pixels_only\": from_pixels,\n }\n if env_library is DMControlEnv:\n env_kwargs.update({\"task_name\": env_task})\n env_kwargs.update(kwargs)\n env = env_library(**env_kwargs)\n elif custom_env is None and custom_env_maker is not None:\n env = custom_env_maker(**kwargs)\n elif custom_env_maker is None and custom_env is not None:\n env = custom_env\n else:\n raise RuntimeError(\"cannot provive both custom_env and custom_env_maker\")\n\n if not return_transformed_envs:\n return env\n\n return make_env_transforms(\n env,\n cfg,\n video_tag,\n logger,\n env_name,\n stats,\n norm_obs_only,\n env_library,\n action_dim_gsde,\n state_dim_gsde,\n batch_dims=batch_dims,\n )\n\n if use_env_creator:\n return env_creator(make_transformed_env)\n return make_transformed_env\n\n\ndef parallel_env_constructor(\n cfg: \"DictConfig\", **kwargs # noqa: F821\n) -> Union[ParallelEnv, EnvCreator]:\n \"\"\"Returns a parallel environment from an argparse.Namespace built with the appropriate parser constructor.\n\n Args:\n cfg (DictConfig): config containing user-defined arguments\n kwargs: keyword arguments for the `transformed_env_constructor` method.\n \"\"\"\n batch_transform = cfg.batch_transform\n if cfg.env_per_collector == 1:\n kwargs.update({\"cfg\": cfg, \"use_env_creator\": True})\n make_transformed_env = transformed_env_constructor(**kwargs)\n return make_transformed_env\n kwargs.update({\"cfg\": cfg, \"use_env_creator\": True})\n make_transformed_env = transformed_env_constructor(\n return_transformed_envs=not batch_transform, **kwargs\n )\n parallel_env = ParallelEnv(\n num_workers=cfg.env_per_collector,\n create_env_fn=make_transformed_env,\n create_env_kwargs=None,\n pin_memory=cfg.pin_memory,\n )\n if batch_transform:\n kwargs.update(\n {\n \"cfg\": cfg,\n \"use_env_creator\": False,\n \"custom_env\": parallel_env,\n \"batch_dims\": 1,\n }\n )\n env = transformed_env_constructor(**kwargs)()\n return env\n return parallel_env\n\n\[email protected]_mode()\ndef get_stats_random_rollout(\n cfg: \"DictConfig\", # noqa: F821\n proof_environment: EnvBase = None,\n key: Optional[str] = None,\n):\n proof_env_is_none = proof_environment is None\n if proof_env_is_none:\n proof_environment = transformed_env_constructor(\n cfg=cfg, use_env_creator=False\n )()\n\n print(\"computing state stats\")\n if not hasattr(cfg, \"init_env_steps\"):\n raise AttributeError(\"init_env_steps missing from arguments.\")\n\n n = 0\n val_stats = []\n while n < cfg.init_env_steps:\n _td_stats = proof_environment.rollout(max_steps=cfg.init_env_steps)\n n += _td_stats.numel()\n val = _td_stats.get(key).cpu()\n val_stats.append(val)\n del _td_stats, val\n val_stats = torch.cat(val_stats, 0)\n\n if key is None:\n keys = list(proof_environment.observation_spec.keys())\n key = keys.pop()\n if len(keys):\n raise RuntimeError(\n f\"More than one key exists in the observation_specs: {[key] + keys} were found, \"\n \"thus get_stats_random_rollout cannot infer which to compute the stats of.\"\n )\n\n if key == \"next_pixels\":\n m = val_stats.mean()\n s = val_stats.std()\n else:\n m = val_stats.mean(dim=0)\n s = val_stats.std(dim=0)\n m[s == 0] = 0.0\n s[s == 0] = 1.0\n\n print(\n f\"stats computed for {val_stats.numel()} steps. Got: \\n\"\n f\"loc = {m}, \\n\"\n f\"scale = {s}\"\n )\n if not torch.isfinite(m).all():\n raise RuntimeError(\"non-finite values found in mean\")\n if not torch.isfinite(s).all():\n raise RuntimeError(\"non-finite values found in sd\")\n stats = {\"loc\": m, \"scale\": s}\n if proof_env_is_none:\n proof_environment.close()\n if (\n proof_environment.device != torch.device(\"cpu\")\n and torch.cuda.device_count() > 0\n ):\n torch.cuda.empty_cache()\n del proof_environment\n return stats\n\n\n@dataclass\nclass EnvConfig:\n env_library: str = \"gym\"\n # env_library used for the simulated environment. Default=gym\n env_name: str = \"Humanoid-v2\"\n # name of the environment to be created. Default=Humanoid-v2\n env_task: str = \"\"\n # task (if any) for the environment. Default=run\n from_pixels: bool = False\n # whether the environment output should be state vector(s) (default) or the pixels.\n frame_skip: int = 1\n # frame_skip for the environment. Note that this value does NOT impact the buffer size,\n # maximum steps per trajectory, frames per batch or any other factor in the algorithm,\n # e.g. if the total number of frames that has to be computed is 50e6 and the frame skip is 4\n # the actual number of frames retrieved will be 200e6. Default=1.\n reward_scaling: Optional[float] = None\n # scale of the reward.\n reward_loc: float = 0.0\n # location of the reward.\n init_env_steps: int = 1000\n # number of random steps to compute normalizing constants\n vecnorm: bool = False\n # Normalizes the environment observation and reward outputs with the running statistics obtained across processes.\n norm_rewards: bool = False\n # If True, rewards will be normalized on the fly. This may interfere with SAC update rule and should be used cautiously.\n norm_stats: bool = True\n # Deactivates the normalization based on random collection of data.\n noops: int = 0\n # number of random steps to do after reset. Default is 0\n catframes: int = 0\n # Number of frames to concatenate through time. Default is 0 (do not use CatFrames).\n center_crop: Any = dataclass_field(default_factory=lambda: [])\n # center crop size.\n grayscale: bool = True\n # Disables grayscale transform.\n max_frames_per_traj: int = 1000\n # Number of steps before a reset of the environment is called (if it has not been flagged as done before).\n batch_transform: bool = False\n # if True, the transforms will be applied to the parallel env, and not to each individual env.\\\n image_size: int = 84\n", "path": "torchrl/trainers/helpers/envs.py" } ]
[ { "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass\nfrom dataclasses import field as dataclass_field\nfrom typing import Callable, Optional, Union, Any, Sequence\n\nimport torch\n\nfrom torchrl.envs import ParallelEnv\nfrom torchrl.envs.common import EnvBase\nfrom torchrl.envs.env_creator import env_creator, EnvCreator\nfrom torchrl.envs.libs.dm_control import DMControlEnv\nfrom torchrl.envs.libs.gym import GymEnv\nfrom torchrl.envs.transforms import (\n CatFrames,\n CatTensors,\n DoubleToFloat,\n FiniteTensorDictCheck,\n GrayScale,\n NoopResetEnv,\n ObservationNorm,\n Resize,\n RewardScaling,\n ToTensorImage,\n TransformedEnv,\n VecNorm,\n CenterCrop,\n)\nfrom torchrl.envs.transforms.transforms import gSDENoise, FlattenObservation\nfrom torchrl.record.recorder import VideoRecorder\nfrom torchrl.trainers.loggers import Logger\n\n__all__ = [\n \"correct_for_frame_skip\",\n \"transformed_env_constructor\",\n \"parallel_env_constructor\",\n \"get_stats_random_rollout\",\n]\n\nLIBS = {\n \"gym\": GymEnv,\n \"dm_control\": DMControlEnv,\n}\n\n\ndef correct_for_frame_skip(cfg: \"DictConfig\") -> \"DictConfig\": # noqa: F821\n \"\"\"\n Correct the arguments for the input frame_skip, by dividing all the arguments that reflect a count of frames by the\n frame_skip.\n This is aimed at avoiding unknowingly over-sampling from the environment, i.e. targetting a total number of frames\n of 1M but actually collecting frame_skip * 1M frames.\n\n Args:\n cfg (DictConfig): DictConfig containing some frame-counting argument, including:\n \"max_frames_per_traj\", \"total_frames\", \"frames_per_batch\", \"record_frames\", \"annealing_frames\",\n \"init_random_frames\", \"init_env_steps\"\n\n Returns:\n the input DictConfig, modified in-place.\n\n \"\"\"\n # Adapt all frame counts wrt frame_skip\n if cfg.frame_skip != 1:\n fields = [\n \"max_frames_per_traj\",\n \"total_frames\",\n \"frames_per_batch\",\n \"record_frames\",\n \"annealing_frames\",\n \"init_random_frames\",\n \"init_env_steps\",\n \"noops\",\n ]\n for field in fields:\n if hasattr(cfg, field):\n setattr(cfg, field, getattr(cfg, field) // cfg.frame_skip)\n return cfg\n\n\ndef make_env_transforms(\n env,\n cfg,\n video_tag,\n logger,\n env_name,\n stats,\n norm_obs_only,\n env_library,\n action_dim_gsde,\n state_dim_gsde,\n batch_dims=0,\n):\n env = TransformedEnv(env)\n\n from_pixels = cfg.from_pixels\n vecnorm = cfg.vecnorm\n norm_rewards = vecnorm and cfg.norm_rewards\n _norm_obs_only = norm_obs_only or not norm_rewards\n reward_scaling = cfg.reward_scaling\n reward_loc = cfg.reward_loc\n\n if len(video_tag):\n center_crop = cfg.center_crop\n if center_crop:\n center_crop = center_crop[0]\n env.append_transform(\n VideoRecorder(\n logger=logger,\n tag=f\"{video_tag}_{env_name}_video\",\n center_crop=center_crop,\n ),\n )\n\n if cfg.noops:\n env.append_transform(NoopResetEnv(cfg.noops))\n\n if from_pixels:\n if not cfg.catframes:\n raise RuntimeError(\n \"this env builder currently only accepts positive catframes values\"\n \"when pixels are being used.\"\n )\n env.append_transform(ToTensorImage())\n if cfg.center_crop:\n env.append_transform(CenterCrop(*cfg.center_crop))\n env.append_transform(Resize(cfg.image_size, cfg.image_size))\n if cfg.grayscale:\n env.append_transform(GrayScale())\n env.append_transform(FlattenObservation())\n env.append_transform(CatFrames(N=cfg.catframes, keys_in=[\"next_pixels\"]))\n if stats is None:\n obs_stats = {\"loc\": 0.0, \"scale\": 1.0}\n else:\n obs_stats = stats\n obs_stats[\"standard_normal\"] = True\n env.append_transform(ObservationNorm(**obs_stats, keys_in=[\"next_pixels\"]))\n if norm_rewards:\n reward_scaling = 1.0\n reward_loc = 0.0\n if norm_obs_only:\n reward_scaling = 1.0\n reward_loc = 0.0\n if reward_scaling is not None:\n env.append_transform(RewardScaling(reward_loc, reward_scaling))\n\n double_to_float_list = []\n double_to_float_inv_list = []\n if env_library is DMControlEnv:\n double_to_float_list += [\n \"reward\",\n ]\n double_to_float_list += [\n \"action\",\n ]\n double_to_float_inv_list += [\"action\"] # DMControl requires double-precision\n if not from_pixels:\n selected_keys = [\n key\n for key in env.observation_spec.keys()\n if (\"pixels\" not in key)\n and (key.replace(\"next_\", \"\") not in env.input_spec.keys())\n ]\n\n # even if there is a single tensor, it'll be renamed in \"next_observation_vector\"\n out_key = \"next_observation_vector\"\n env.append_transform(CatTensors(keys_in=selected_keys, out_key=out_key))\n\n if not vecnorm:\n if stats is None:\n _stats = {\"loc\": 0.0, \"scale\": 1.0}\n else:\n _stats = stats\n env.append_transform(\n ObservationNorm(**_stats, keys_in=[out_key], standard_normal=True)\n )\n else:\n env.append_transform(\n VecNorm(\n keys_in=[out_key, \"reward\"] if not _norm_obs_only else [out_key],\n decay=0.9999,\n )\n )\n\n double_to_float_list.append(out_key)\n env.append_transform(\n DoubleToFloat(\n keys_in=double_to_float_list, keys_inv_in=double_to_float_inv_list\n )\n )\n\n if hasattr(cfg, \"catframes\") and cfg.catframes:\n env.append_transform(\n CatFrames(N=cfg.catframes, keys_in=[out_key], cat_dim=-1)\n )\n\n else:\n env.append_transform(\n DoubleToFloat(\n keys_in=double_to_float_list, keys_inv_in=double_to_float_inv_list\n )\n )\n\n if hasattr(cfg, \"gSDE\") and cfg.gSDE:\n env.append_transform(\n gSDENoise(action_dim=action_dim_gsde, state_dim=state_dim_gsde)\n )\n\n env.append_transform(FiniteTensorDictCheck())\n return env\n\n\ndef transformed_env_constructor(\n cfg: \"DictConfig\", # noqa: F821\n video_tag: str = \"\",\n logger: Optional[Logger] = None,\n stats: Optional[dict] = None,\n norm_obs_only: bool = False,\n use_env_creator: bool = False,\n custom_env_maker: Optional[Callable] = None,\n custom_env: Optional[EnvBase] = None,\n return_transformed_envs: bool = True,\n action_dim_gsde: Optional[int] = None,\n state_dim_gsde: Optional[int] = None,\n batch_dims: Optional[int] = 0,\n) -> Union[Callable, EnvCreator]:\n \"\"\"\n Returns an environment creator from an argparse.Namespace built with the appropriate parser constructor.\n\n Args:\n cfg (DictConfig): a DictConfig containing the arguments of the script.\n video_tag (str, optional): video tag to be passed to the Logger object\n logger (Logger, optional): logger associated with the script\n stats (dict, optional): a dictionary containing the `loc` and `scale` for the `ObservationNorm` transform\n norm_obs_only (bool, optional): If `True` and `VecNorm` is used, the reward won't be normalized online.\n Default is `False`.\n use_env_creator (bool, optional): wheter the `EnvCreator` class should be used. By using `EnvCreator`,\n one can make sure that running statistics will be put in shared memory and accessible for all workers\n when using a `VecNorm` transform. Default is `True`.\n custom_env_maker (callable, optional): if your env maker is not part\n of torchrl env wrappers, a custom callable\n can be passed instead. In this case it will override the\n constructor retrieved from `args`.\n custom_env (EnvBase, optional): if an existing environment needs to be\n transformed_in, it can be passed directly to this helper. `custom_env_maker`\n and `custom_env` are exclusive features.\n return_transformed_envs (bool, optional): if True, a transformed_in environment\n is returned.\n action_dim_gsde (int, Optional): if gSDE is used, this can present the action dim to initialize the noise.\n Make sure this is indicated in environment executed in parallel.\n state_dim_gsde: if gSDE is used, this can present the state dim to initialize the noise.\n Make sure this is indicated in environment executed in parallel.\n batch_dims (int, optional): number of dimensions of a batch of data. If a single env is\n used, it should be 0 (default). If multiple envs are being transformed in parallel,\n it should be set to 1 (or the number of dims of the batch).\n \"\"\"\n\n def make_transformed_env(**kwargs) -> TransformedEnv:\n env_name = cfg.env_name\n env_task = cfg.env_task\n env_library = LIBS[cfg.env_library]\n frame_skip = cfg.frame_skip\n from_pixels = cfg.from_pixels\n\n if custom_env is None and custom_env_maker is None:\n if isinstance(cfg.collector_devices, str):\n device = cfg.collector_devices\n elif isinstance(cfg.collector_devices, Sequence):\n device = cfg.collector_devices[0]\n else:\n raise ValueError(\n \"collector_devices must be either a string or a sequence of strings\"\n )\n env_kwargs = {\n \"env_name\": env_name,\n \"device\": device,\n \"frame_skip\": frame_skip,\n \"from_pixels\": from_pixels or len(video_tag),\n \"pixels_only\": from_pixels,\n }\n if env_library is DMControlEnv:\n env_kwargs.update({\"task_name\": env_task})\n env_kwargs.update(kwargs)\n env = env_library(**env_kwargs)\n elif custom_env is None and custom_env_maker is not None:\n env = custom_env_maker(**kwargs)\n elif custom_env_maker is None and custom_env is not None:\n env = custom_env\n else:\n raise RuntimeError(\"cannot provive both custom_env and custom_env_maker\")\n\n if not return_transformed_envs:\n return env\n\n return make_env_transforms(\n env,\n cfg,\n video_tag,\n logger,\n env_name,\n stats,\n norm_obs_only,\n env_library,\n action_dim_gsde,\n state_dim_gsde,\n batch_dims=batch_dims,\n )\n\n if use_env_creator:\n return env_creator(make_transformed_env)\n return make_transformed_env\n\n\ndef parallel_env_constructor(\n cfg: \"DictConfig\", **kwargs # noqa: F821\n) -> Union[ParallelEnv, EnvCreator]:\n \"\"\"Returns a parallel environment from an argparse.Namespace built with the appropriate parser constructor.\n\n Args:\n cfg (DictConfig): config containing user-defined arguments\n kwargs: keyword arguments for the `transformed_env_constructor` method.\n \"\"\"\n batch_transform = cfg.batch_transform\n if cfg.env_per_collector == 1:\n kwargs.update({\"cfg\": cfg, \"use_env_creator\": True})\n make_transformed_env = transformed_env_constructor(**kwargs)\n return make_transformed_env\n kwargs.update({\"cfg\": cfg, \"use_env_creator\": True})\n make_transformed_env = transformed_env_constructor(\n return_transformed_envs=not batch_transform, **kwargs\n )\n parallel_env = ParallelEnv(\n num_workers=cfg.env_per_collector,\n create_env_fn=make_transformed_env,\n create_env_kwargs=None,\n pin_memory=cfg.pin_memory,\n )\n if batch_transform:\n kwargs.update(\n {\n \"cfg\": cfg,\n \"use_env_creator\": False,\n \"custom_env\": parallel_env,\n \"batch_dims\": 1,\n }\n )\n env = transformed_env_constructor(**kwargs)()\n return env\n return parallel_env\n\n\[email protected]_grad()\ndef get_stats_random_rollout(\n cfg: \"DictConfig\", # noqa: F821\n proof_environment: EnvBase = None,\n key: Optional[str] = None,\n):\n proof_env_is_none = proof_environment is None\n if proof_env_is_none:\n proof_environment = transformed_env_constructor(\n cfg=cfg, use_env_creator=False\n )()\n\n print(\"computing state stats\")\n if not hasattr(cfg, \"init_env_steps\"):\n raise AttributeError(\"init_env_steps missing from arguments.\")\n\n n = 0\n val_stats = []\n while n < cfg.init_env_steps:\n _td_stats = proof_environment.rollout(max_steps=cfg.init_env_steps)\n n += _td_stats.numel()\n val = _td_stats.get(key).cpu()\n val_stats.append(val)\n del _td_stats, val\n val_stats = torch.cat(val_stats, 0)\n\n if key is None:\n keys = list(proof_environment.observation_spec.keys())\n key = keys.pop()\n if len(keys):\n raise RuntimeError(\n f\"More than one key exists in the observation_specs: {[key] + keys} were found, \"\n \"thus get_stats_random_rollout cannot infer which to compute the stats of.\"\n )\n\n if key == \"next_pixels\":\n m = val_stats.mean()\n s = val_stats.std()\n else:\n m = val_stats.mean(dim=0)\n s = val_stats.std(dim=0)\n m[s == 0] = 0.0\n s[s == 0] = 1.0\n\n print(\n f\"stats computed for {val_stats.numel()} steps. Got: \\n\"\n f\"loc = {m}, \\n\"\n f\"scale = {s}\"\n )\n if not torch.isfinite(m).all():\n raise RuntimeError(\"non-finite values found in mean\")\n if not torch.isfinite(s).all():\n raise RuntimeError(\"non-finite values found in sd\")\n stats = {\"loc\": m, \"scale\": s}\n if proof_env_is_none:\n proof_environment.close()\n if (\n proof_environment.device != torch.device(\"cpu\")\n and torch.cuda.device_count() > 0\n ):\n torch.cuda.empty_cache()\n del proof_environment\n return stats\n\n\n@dataclass\nclass EnvConfig:\n env_library: str = \"gym\"\n # env_library used for the simulated environment. Default=gym\n env_name: str = \"Humanoid-v2\"\n # name of the environment to be created. Default=Humanoid-v2\n env_task: str = \"\"\n # task (if any) for the environment. Default=run\n from_pixels: bool = False\n # whether the environment output should be state vector(s) (default) or the pixels.\n frame_skip: int = 1\n # frame_skip for the environment. Note that this value does NOT impact the buffer size,\n # maximum steps per trajectory, frames per batch or any other factor in the algorithm,\n # e.g. if the total number of frames that has to be computed is 50e6 and the frame skip is 4\n # the actual number of frames retrieved will be 200e6. Default=1.\n reward_scaling: Optional[float] = None\n # scale of the reward.\n reward_loc: float = 0.0\n # location of the reward.\n init_env_steps: int = 1000\n # number of random steps to compute normalizing constants\n vecnorm: bool = False\n # Normalizes the environment observation and reward outputs with the running statistics obtained across processes.\n norm_rewards: bool = False\n # If True, rewards will be normalized on the fly. This may interfere with SAC update rule and should be used cautiously.\n norm_stats: bool = True\n # Deactivates the normalization based on random collection of data.\n noops: int = 0\n # number of random steps to do after reset. Default is 0\n catframes: int = 0\n # Number of frames to concatenate through time. Default is 0 (do not use CatFrames).\n center_crop: Any = dataclass_field(default_factory=lambda: [])\n # center crop size.\n grayscale: bool = True\n # Disables grayscale transform.\n max_frames_per_traj: int = 1000\n # Number of steps before a reset of the environment is called (if it has not been flagged as done before).\n batch_transform: bool = False\n # if True, the transforms will be applied to the parallel env, and not to each individual env.\\\n image_size: int = 84\n", "path": "torchrl/trainers/helpers/envs.py" } ]
diff --git a/torchrl/trainers/helpers/envs.py b/torchrl/trainers/helpers/envs.py index b01823e32ef..f5349a4766a 100644 --- a/torchrl/trainers/helpers/envs.py +++ b/torchrl/trainers/helpers/envs.py @@ -351,7 +351,7 @@ def parallel_env_constructor( return parallel_env [email protected]_mode() [email protected]_grad() def get_stats_random_rollout( cfg: "DictConfig", # noqa: F821 proof_environment: EnvBase = None,
crytic__slither-1229
slither fails with no detectors results and sarif output ### Describe the issue: Running for example ``` slither --exclude-informational tests/test_node_modules/node_modules/@openzeppelin/contracts/utils/math/Math.sol --sarif slither.sarif ``` Leads to a crash ``` tests/test_node_modules/node_modules/@openzeppelin/contracts/utils/math/Math.sol analyzed (1 contracts with 61 detectors), 0 result(s) found Traceback (most recent call last): File "/home/lulu/r/crytic/slither/venv/bin/slither", line 33, in <module> sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither')()) File "/home/lulu/r/crytic/slither/slither/__main__.py", line 643, in main main_impl(all_detector_classes=detectors, all_printer_classes=printers) File "/home/lulu/r/crytic/slither/slither/__main__.py", line 831, in main_impl output_to_sarif( File "/home/lulu/r/crytic/slither/slither/utils/output.py", line 163, in output_to_sarif for detector in results["detectors"]: KeyError: 'detectors' ``` The `"detectors"` key is missing because it's not added if empty: https://github.com/crytic/slither/blob/168e96298fb8f8a588c110aa75cd38b3a7662ed9/slither/__main__.py#L769-L770 ### Code example to reproduce the issue: ``` slither --exclude-informational tests/test_node_modules/node_modules/@openzeppelin/contracts/utils/math/Math.sol --sarif slither.sarif ``` ### Version: `0.8.3` or fba37f2c0c8196079719432d6324e42a1a974399 ### Relevant log output: _No response_
[ { "content": "import hashlib\nimport os\nimport json\nimport logging\nimport zipfile\nfrom collections import OrderedDict\nfrom typing import Optional, Dict, List, Union, Any, TYPE_CHECKING\nfrom zipfile import ZipFile\nfrom pkg_resources import require\n\nfrom slither.core.cfg.node import Node\nfrom slither.core.declarations import Contract, Function, Enum, Event, Structure, Pragma\nfrom slither.core.source_mapping.source_mapping import SourceMapping\nfrom slither.core.variables.variable import Variable\nfrom slither.exceptions import SlitherError\nfrom slither.utils.colors import yellow\nfrom slither.utils.myprettytable import MyPrettyTable\n\nif TYPE_CHECKING:\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.detectors.abstract_detector import AbstractDetector\n\nlogger = logging.getLogger(\"Slither\")\n\n\n###################################################################################\n###################################################################################\n# region Output\n###################################################################################\n###################################################################################\n\n\ndef output_to_json(filename: Optional[str], error, results: Dict) -> None:\n \"\"\"\n\n :param filename: Filename where the json will be written. If None or \"-\", write to stdout\n :param error: Error to report\n :param results: Results to report\n :param logger: Logger where to log potential info\n :return:\n \"\"\"\n # Create our encapsulated JSON result.\n json_result = {\"success\": error is None, \"error\": error, \"results\": results}\n\n if filename == \"-\":\n filename = None\n\n # Determine if we should output to stdout\n if filename is None:\n # Write json to console\n print(json.dumps(json_result))\n else:\n # Write json to file\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n json.dump(json_result, f, indent=2)\n\n\ndef _output_result_to_sarif(\n detector: Dict, detectors_classes: List[\"AbstractDetector\"], sarif: Dict\n) -> None:\n confidence = \"very-high\"\n if detector[\"confidence\"] == \"Medium\":\n confidence = \"high\"\n elif detector[\"confidence\"] == \"Low\":\n confidence = \"medium\"\n elif detector[\"confidence\"] == \"Informational\":\n confidence = \"low\"\n\n risk = \"0.0\"\n if detector[\"impact\"] == \"High\":\n risk = \"8.0\"\n elif detector[\"impact\"] == \"Medium\":\n risk = \"4.0\"\n elif detector[\"impact\"] == \"Low\":\n risk = \"3.0\"\n\n detector_class = next((d for d in detectors_classes if d.ARGUMENT == detector[\"check\"]))\n check_id = (\n str(detector_class.IMPACT.value)\n + \"-\"\n + str(detector_class.CONFIDENCE.value)\n + \"-\"\n + detector[\"check\"]\n )\n\n rule = {\n \"id\": check_id,\n \"name\": detector[\"check\"],\n \"properties\": {\"precision\": confidence, \"security-severity\": risk},\n \"shortDescription\": {\"text\": detector_class.WIKI_TITLE},\n \"help\": {\"text\": detector_class.WIKI_RECOMMENDATION},\n }\n # Add the rule if does not exist yet\n if len([x for x in sarif[\"runs\"][0][\"tool\"][\"driver\"][\"rules\"] if x[\"id\"] == check_id]) == 0:\n sarif[\"runs\"][0][\"tool\"][\"driver\"][\"rules\"].append(rule)\n\n if not detector[\"elements\"]:\n logger.info(yellow(\"Cannot generate Github security alert for finding without location\"))\n logger.info(yellow(detector[\"description\"]))\n logger.info(yellow(\"This will be supported in a future Slither release\"))\n return\n\n # From 3.19.10 (http://docs.oasis-open.org/sarif/sarif/v2.0/csprd01/sarif-v2.0-csprd01.html)\n # The locations array SHALL NOT contain more than one element unless the condition indicated by the result,\n # if any, can only be corrected by making a change at every location specified in the array.\n finding = detector[\"elements\"][0]\n path = finding[\"source_mapping\"][\"filename_relative\"]\n start_line = finding[\"source_mapping\"][\"lines\"][0]\n end_line = finding[\"source_mapping\"][\"lines\"][-1]\n\n sarif[\"runs\"][0][\"results\"].append(\n {\n \"ruleId\": check_id,\n \"message\": {\"text\": detector[\"description\"], \"markdown\": detector[\"markdown\"]},\n \"level\": \"warning\",\n \"locations\": [\n {\n \"physicalLocation\": {\n \"artifactLocation\": {\"uri\": path},\n \"region\": {\"startLine\": start_line, \"endLine\": end_line},\n }\n }\n ],\n \"partialFingerprints\": {\"id\": detector[\"id\"]},\n }\n )\n\n\ndef output_to_sarif(\n filename: Optional[str], results: Dict, detectors_classes: List[\"AbstractDetector\"]\n) -> None:\n \"\"\"\n\n :param filename:\n :type filename:\n :param results:\n :type results:\n :return:\n :rtype:\n \"\"\"\n\n sarif: Dict[str, Any] = {\n \"$schema\": \"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json\",\n \"version\": \"2.1.0\",\n \"runs\": [\n {\n \"tool\": {\n \"driver\": {\n \"name\": \"Slither\",\n \"informationUri\": \"https://github.com/crytic/slither\",\n \"version\": require(\"slither-analyzer\")[0].version,\n \"rules\": [],\n }\n },\n \"results\": [],\n }\n ],\n }\n\n for detector in results[\"detectors\"]:\n _output_result_to_sarif(detector, detectors_classes, sarif)\n\n if filename == \"-\":\n filename = None\n\n # Determine if we should output to stdout\n if filename is None:\n # Write json to console\n print(json.dumps(sarif))\n else:\n # Write json to file\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n json.dump(sarif, f, indent=2)\n\n\n# https://docs.python.org/3/library/zipfile.html#zipfile-objects\nZIP_TYPES_ACCEPTED = {\n \"lzma\": zipfile.ZIP_LZMA,\n \"stored\": zipfile.ZIP_STORED,\n \"deflated\": zipfile.ZIP_DEFLATED,\n \"bzip2\": zipfile.ZIP_BZIP2,\n}\n\n\ndef output_to_zip(filename: str, error: Optional[str], results: Dict, zip_type: str = \"lzma\"):\n \"\"\"\n Output the results to a zip\n The file in the zip is named slither_results.json\n Note: the json file will not have indentation, as a result the resulting json file will be smaller\n :param zip_type:\n :param filename:\n :param error:\n :param results:\n :return:\n \"\"\"\n json_result = {\"success\": error is None, \"error\": error, \"results\": results}\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with ZipFile(\n filename,\n \"w\",\n compression=ZIP_TYPES_ACCEPTED.get(zip_type, zipfile.ZIP_LZMA),\n ) as file_desc:\n file_desc.writestr(\"slither_results.json\", json.dumps(json_result).encode(\"utf8\"))\n\n\n# endregion\n###################################################################################\n###################################################################################\n# region Json generation\n###################################################################################\n###################################################################################\n\n\ndef _convert_to_description(d):\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"{d.expression} ({d.source_mapping_str})\"\n return f\"{str(d)} ({d.source_mapping_str})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"{d.canonical_name} ({d.source_mapping_str})\"\n\n if hasattr(d, \"name\"):\n return f\"{d.name} ({d.source_mapping_str})\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\ndef _convert_to_markdown(d, markdown_root):\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"[{d.expression}]({d.source_mapping_to_markdown(markdown_root)})\"\n return f\"[{str(d)}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"[{d.canonical_name}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n if hasattr(d, \"name\"):\n return f\"[{d.name}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\ndef _convert_to_id(d):\n \"\"\"\n Id keeps the source mapping of the node, otherwise we risk to consider two different node as the same\n :param d:\n :return:\n \"\"\"\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"{d.expression} ({d.source_mapping_str})\"\n return f\"{str(d)} ({d.source_mapping_str})\"\n\n if isinstance(d, Pragma):\n return f\"{d} ({d.source_mapping_str})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"{d.canonical_name}\"\n\n if hasattr(d, \"name\"):\n return f\"{d.name}\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\n# endregion\n###################################################################################\n###################################################################################\n# region Internal functions\n###################################################################################\n###################################################################################\n\n\ndef _create_base_element(\n custom_type, name, source_mapping, type_specific_fields=None, additional_fields=None\n):\n if additional_fields is None:\n additional_fields = {}\n if type_specific_fields is None:\n type_specific_fields = {}\n element = {\"type\": custom_type, \"name\": name, \"source_mapping\": source_mapping}\n if type_specific_fields:\n element[\"type_specific_fields\"] = type_specific_fields\n if additional_fields:\n element[\"additional_fields\"] = additional_fields\n return element\n\n\ndef _create_parent_element(element):\n # pylint: disable=import-outside-toplevel\n from slither.core.children.child_contract import ChildContract\n from slither.core.children.child_function import ChildFunction\n from slither.core.children.child_inheritance import ChildInheritance\n\n if isinstance(element, ChildInheritance):\n if element.contract_declarer:\n contract = Output(\"\")\n contract.add_contract(element.contract_declarer)\n return contract.data[\"elements\"][0]\n elif isinstance(element, ChildContract):\n if element.contract:\n contract = Output(\"\")\n contract.add_contract(element.contract)\n return contract.data[\"elements\"][0]\n elif isinstance(element, ChildFunction):\n if element.function:\n function = Output(\"\")\n function.add_function(element.function)\n return function.data[\"elements\"][0]\n return None\n\n\nSupportedOutput = Union[Variable, Contract, Function, Enum, Event, Structure, Pragma, Node]\nAllSupportedOutput = Union[str, SupportedOutput]\n\n\nclass Output:\n def __init__(\n self,\n info_: Union[str, List[Union[str, SupportedOutput]]],\n additional_fields: Optional[Dict] = None,\n markdown_root=\"\",\n standard_format=True,\n ):\n if additional_fields is None:\n additional_fields = {}\n\n # Allow info to be a string to simplify the API\n info: List[Union[str, SupportedOutput]]\n if isinstance(info_, str):\n info = [info_]\n else:\n info = info_\n\n self._data: Dict[str, Any] = OrderedDict()\n self._data[\"elements\"] = []\n self._data[\"description\"] = \"\".join(_convert_to_description(d) for d in info)\n self._data[\"markdown\"] = \"\".join(_convert_to_markdown(d, markdown_root) for d in info)\n self._data[\"first_markdown_element\"] = \"\"\n self._markdown_root = markdown_root\n\n id_txt = \"\".join(_convert_to_id(d) for d in info)\n self._data[\"id\"] = hashlib.sha3_256(id_txt.encode(\"utf-8\")).hexdigest()\n\n if standard_format:\n to_add = [i for i in info if not isinstance(i, str)]\n\n for add in to_add:\n self.add(add)\n\n if additional_fields:\n self._data[\"additional_fields\"] = additional_fields\n\n def add(self, add: SupportedOutput, additional_fields: Optional[Dict] = None):\n if not self._data[\"first_markdown_element\"]:\n self._data[\"first_markdown_element\"] = add.source_mapping_to_markdown(\n self._markdown_root\n )\n if isinstance(add, Variable):\n self.add_variable(add, additional_fields=additional_fields)\n elif isinstance(add, Contract):\n self.add_contract(add, additional_fields=additional_fields)\n elif isinstance(add, Function):\n self.add_function(add, additional_fields=additional_fields)\n elif isinstance(add, Enum):\n self.add_enum(add, additional_fields=additional_fields)\n elif isinstance(add, Event):\n self.add_event(add, additional_fields=additional_fields)\n elif isinstance(add, Structure):\n self.add_struct(add, additional_fields=additional_fields)\n elif isinstance(add, Pragma):\n self.add_pragma(add, additional_fields=additional_fields)\n elif isinstance(add, Node):\n self.add_node(add, additional_fields=additional_fields)\n else:\n raise SlitherError(f\"Impossible to add {type(add)} to the json\")\n\n @property\n def data(self) -> Dict:\n return self._data\n\n @property\n def elements(self) -> List[Dict]:\n return self._data[\"elements\"]\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Variables\n ###################################################################################\n ###################################################################################\n\n def add_variable(self, variable: Variable, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(variable)}\n element = _create_base_element(\n \"variable\",\n variable.name,\n variable.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_variables(self, variables: List[Variable]):\n for variable in sorted(variables, key=lambda x: x.name):\n self.add_variable(variable)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Contract\n ###################################################################################\n ###################################################################################\n\n def add_contract(self, contract: Contract, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n element = _create_base_element(\n \"contract\", contract.name, contract.source_mapping, {}, additional_fields\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Functions\n ###################################################################################\n ###################################################################################\n\n def add_function(self, function: Function, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(function),\n \"signature\": function.full_name,\n }\n element = _create_base_element(\n \"function\",\n function.name,\n function.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_functions(self, functions: List[Function], additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n for function in sorted(functions, key=lambda x: x.name):\n self.add_function(function, additional_fields)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Enum\n ###################################################################################\n ###################################################################################\n\n def add_enum(self, enum: Enum, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(enum)}\n element = _create_base_element(\n \"enum\",\n enum.name,\n enum.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Structures\n ###################################################################################\n ###################################################################################\n\n def add_struct(self, struct: Structure, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(struct)}\n element = _create_base_element(\n \"struct\",\n struct.name,\n struct.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Events\n ###################################################################################\n ###################################################################################\n\n def add_event(self, event: Event, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(event),\n \"signature\": event.full_name,\n }\n element = _create_base_element(\n \"event\",\n event.name,\n event.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Nodes\n ###################################################################################\n ###################################################################################\n\n def add_node(self, node: Node, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(node),\n }\n node_name = str(node.expression) if node.expression else \"\"\n element = _create_base_element(\n \"node\",\n node_name,\n node.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_nodes(self, nodes: List[Node]):\n for node in sorted(nodes, key=lambda x: x.node_id):\n self.add_node(node)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Pragma\n ###################################################################################\n ###################################################################################\n\n def add_pragma(self, pragma: Pragma, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"directive\": pragma.directive}\n element = _create_base_element(\n \"pragma\",\n pragma.version,\n pragma.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region File\n ###################################################################################\n ###################################################################################\n\n def add_file(self, filename: str, content: str, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"filename\": filename, \"content\": content}\n element = _create_base_element(\"file\", type_specific_fields, additional_fields)\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Pretty Table\n ###################################################################################\n ###################################################################################\n\n def add_pretty_table(\n self,\n content: MyPrettyTable,\n name: str,\n additional_fields: Optional[Dict] = None,\n ):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"content\": content.to_json(), \"name\": name}\n element = _create_base_element(\"pretty_table\", type_specific_fields, additional_fields)\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Others\n ###################################################################################\n ###################################################################################\n\n def add_other(\n self,\n name: str,\n source_mapping,\n compilation_unit: \"SlitherCompilationUnit\",\n additional_fields: Optional[Dict] = None,\n ):\n # If this a tuple with (filename, start, end), convert it to a source mapping.\n if additional_fields is None:\n additional_fields = {}\n if isinstance(source_mapping, tuple):\n # Parse the source id\n (filename, start, end) = source_mapping\n source_id = next(\n (\n source_unit_id\n for (\n source_unit_id,\n source_unit_filename,\n ) in compilation_unit.source_units.items()\n if source_unit_filename == filename\n ),\n -1,\n )\n\n # Convert to a source mapping string\n source_mapping = f\"{start}:{end}:{source_id}\"\n\n # If this is a source mapping string, parse it.\n if isinstance(source_mapping, str):\n source_mapping_str = source_mapping\n source_mapping = SourceMapping()\n source_mapping.set_offset(source_mapping_str, compilation_unit)\n\n # If this is a source mapping object, get the underlying source mapping dictionary\n if isinstance(source_mapping, SourceMapping):\n source_mapping = source_mapping.source_mapping\n\n # Create the underlying element and add it to our resulting json\n element = _create_base_element(\"other\", name, source_mapping, {}, additional_fields)\n self._data[\"elements\"].append(element)\n", "path": "slither/utils/output.py" } ]
[ { "content": "import hashlib\nimport os\nimport json\nimport logging\nimport zipfile\nfrom collections import OrderedDict\nfrom typing import Optional, Dict, List, Union, Any, TYPE_CHECKING\nfrom zipfile import ZipFile\nfrom pkg_resources import require\n\nfrom slither.core.cfg.node import Node\nfrom slither.core.declarations import Contract, Function, Enum, Event, Structure, Pragma\nfrom slither.core.source_mapping.source_mapping import SourceMapping\nfrom slither.core.variables.variable import Variable\nfrom slither.exceptions import SlitherError\nfrom slither.utils.colors import yellow\nfrom slither.utils.myprettytable import MyPrettyTable\n\nif TYPE_CHECKING:\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.detectors.abstract_detector import AbstractDetector\n\nlogger = logging.getLogger(\"Slither\")\n\n\n###################################################################################\n###################################################################################\n# region Output\n###################################################################################\n###################################################################################\n\n\ndef output_to_json(filename: Optional[str], error, results: Dict) -> None:\n \"\"\"\n\n :param filename: Filename where the json will be written. If None or \"-\", write to stdout\n :param error: Error to report\n :param results: Results to report\n :param logger: Logger where to log potential info\n :return:\n \"\"\"\n # Create our encapsulated JSON result.\n json_result = {\"success\": error is None, \"error\": error, \"results\": results}\n\n if filename == \"-\":\n filename = None\n\n # Determine if we should output to stdout\n if filename is None:\n # Write json to console\n print(json.dumps(json_result))\n else:\n # Write json to file\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n json.dump(json_result, f, indent=2)\n\n\ndef _output_result_to_sarif(\n detector: Dict, detectors_classes: List[\"AbstractDetector\"], sarif: Dict\n) -> None:\n confidence = \"very-high\"\n if detector[\"confidence\"] == \"Medium\":\n confidence = \"high\"\n elif detector[\"confidence\"] == \"Low\":\n confidence = \"medium\"\n elif detector[\"confidence\"] == \"Informational\":\n confidence = \"low\"\n\n risk = \"0.0\"\n if detector[\"impact\"] == \"High\":\n risk = \"8.0\"\n elif detector[\"impact\"] == \"Medium\":\n risk = \"4.0\"\n elif detector[\"impact\"] == \"Low\":\n risk = \"3.0\"\n\n detector_class = next((d for d in detectors_classes if d.ARGUMENT == detector[\"check\"]))\n check_id = (\n str(detector_class.IMPACT.value)\n + \"-\"\n + str(detector_class.CONFIDENCE.value)\n + \"-\"\n + detector[\"check\"]\n )\n\n rule = {\n \"id\": check_id,\n \"name\": detector[\"check\"],\n \"properties\": {\"precision\": confidence, \"security-severity\": risk},\n \"shortDescription\": {\"text\": detector_class.WIKI_TITLE},\n \"help\": {\"text\": detector_class.WIKI_RECOMMENDATION},\n }\n # Add the rule if does not exist yet\n if len([x for x in sarif[\"runs\"][0][\"tool\"][\"driver\"][\"rules\"] if x[\"id\"] == check_id]) == 0:\n sarif[\"runs\"][0][\"tool\"][\"driver\"][\"rules\"].append(rule)\n\n if not detector[\"elements\"]:\n logger.info(yellow(\"Cannot generate Github security alert for finding without location\"))\n logger.info(yellow(detector[\"description\"]))\n logger.info(yellow(\"This will be supported in a future Slither release\"))\n return\n\n # From 3.19.10 (http://docs.oasis-open.org/sarif/sarif/v2.0/csprd01/sarif-v2.0-csprd01.html)\n # The locations array SHALL NOT contain more than one element unless the condition indicated by the result,\n # if any, can only be corrected by making a change at every location specified in the array.\n finding = detector[\"elements\"][0]\n path = finding[\"source_mapping\"][\"filename_relative\"]\n start_line = finding[\"source_mapping\"][\"lines\"][0]\n end_line = finding[\"source_mapping\"][\"lines\"][-1]\n\n sarif[\"runs\"][0][\"results\"].append(\n {\n \"ruleId\": check_id,\n \"message\": {\"text\": detector[\"description\"], \"markdown\": detector[\"markdown\"]},\n \"level\": \"warning\",\n \"locations\": [\n {\n \"physicalLocation\": {\n \"artifactLocation\": {\"uri\": path},\n \"region\": {\"startLine\": start_line, \"endLine\": end_line},\n }\n }\n ],\n \"partialFingerprints\": {\"id\": detector[\"id\"]},\n }\n )\n\n\ndef output_to_sarif(\n filename: Optional[str], results: Dict, detectors_classes: List[\"AbstractDetector\"]\n) -> None:\n \"\"\"\n\n :param filename:\n :type filename:\n :param results:\n :type results:\n :return:\n :rtype:\n \"\"\"\n\n sarif: Dict[str, Any] = {\n \"$schema\": \"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json\",\n \"version\": \"2.1.0\",\n \"runs\": [\n {\n \"tool\": {\n \"driver\": {\n \"name\": \"Slither\",\n \"informationUri\": \"https://github.com/crytic/slither\",\n \"version\": require(\"slither-analyzer\")[0].version,\n \"rules\": [],\n }\n },\n \"results\": [],\n }\n ],\n }\n\n for detector in results.get(\"detectors\", []):\n _output_result_to_sarif(detector, detectors_classes, sarif)\n\n if filename == \"-\":\n filename = None\n\n # Determine if we should output to stdout\n if filename is None:\n # Write json to console\n print(json.dumps(sarif))\n else:\n # Write json to file\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n json.dump(sarif, f, indent=2)\n\n\n# https://docs.python.org/3/library/zipfile.html#zipfile-objects\nZIP_TYPES_ACCEPTED = {\n \"lzma\": zipfile.ZIP_LZMA,\n \"stored\": zipfile.ZIP_STORED,\n \"deflated\": zipfile.ZIP_DEFLATED,\n \"bzip2\": zipfile.ZIP_BZIP2,\n}\n\n\ndef output_to_zip(filename: str, error: Optional[str], results: Dict, zip_type: str = \"lzma\"):\n \"\"\"\n Output the results to a zip\n The file in the zip is named slither_results.json\n Note: the json file will not have indentation, as a result the resulting json file will be smaller\n :param zip_type:\n :param filename:\n :param error:\n :param results:\n :return:\n \"\"\"\n json_result = {\"success\": error is None, \"error\": error, \"results\": results}\n if os.path.isfile(filename):\n logger.info(yellow(f\"{filename} exists already, the overwrite is prevented\"))\n else:\n with ZipFile(\n filename,\n \"w\",\n compression=ZIP_TYPES_ACCEPTED.get(zip_type, zipfile.ZIP_LZMA),\n ) as file_desc:\n file_desc.writestr(\"slither_results.json\", json.dumps(json_result).encode(\"utf8\"))\n\n\n# endregion\n###################################################################################\n###################################################################################\n# region Json generation\n###################################################################################\n###################################################################################\n\n\ndef _convert_to_description(d):\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"{d.expression} ({d.source_mapping_str})\"\n return f\"{str(d)} ({d.source_mapping_str})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"{d.canonical_name} ({d.source_mapping_str})\"\n\n if hasattr(d, \"name\"):\n return f\"{d.name} ({d.source_mapping_str})\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\ndef _convert_to_markdown(d, markdown_root):\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"[{d.expression}]({d.source_mapping_to_markdown(markdown_root)})\"\n return f\"[{str(d)}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"[{d.canonical_name}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n if hasattr(d, \"name\"):\n return f\"[{d.name}]({d.source_mapping_to_markdown(markdown_root)})\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\ndef _convert_to_id(d):\n \"\"\"\n Id keeps the source mapping of the node, otherwise we risk to consider two different node as the same\n :param d:\n :return:\n \"\"\"\n if isinstance(d, str):\n return d\n\n if not isinstance(d, SourceMapping):\n raise SlitherError(f\"{d} does not inherit from SourceMapping, conversion impossible\")\n\n if isinstance(d, Node):\n if d.expression:\n return f\"{d.expression} ({d.source_mapping_str})\"\n return f\"{str(d)} ({d.source_mapping_str})\"\n\n if isinstance(d, Pragma):\n return f\"{d} ({d.source_mapping_str})\"\n\n if hasattr(d, \"canonical_name\"):\n return f\"{d.canonical_name}\"\n\n if hasattr(d, \"name\"):\n return f\"{d.name}\"\n\n raise SlitherError(f\"{type(d)} cannot be converted (no name, or canonical_name\")\n\n\n# endregion\n###################################################################################\n###################################################################################\n# region Internal functions\n###################################################################################\n###################################################################################\n\n\ndef _create_base_element(\n custom_type, name, source_mapping, type_specific_fields=None, additional_fields=None\n):\n if additional_fields is None:\n additional_fields = {}\n if type_specific_fields is None:\n type_specific_fields = {}\n element = {\"type\": custom_type, \"name\": name, \"source_mapping\": source_mapping}\n if type_specific_fields:\n element[\"type_specific_fields\"] = type_specific_fields\n if additional_fields:\n element[\"additional_fields\"] = additional_fields\n return element\n\n\ndef _create_parent_element(element):\n # pylint: disable=import-outside-toplevel\n from slither.core.children.child_contract import ChildContract\n from slither.core.children.child_function import ChildFunction\n from slither.core.children.child_inheritance import ChildInheritance\n\n if isinstance(element, ChildInheritance):\n if element.contract_declarer:\n contract = Output(\"\")\n contract.add_contract(element.contract_declarer)\n return contract.data[\"elements\"][0]\n elif isinstance(element, ChildContract):\n if element.contract:\n contract = Output(\"\")\n contract.add_contract(element.contract)\n return contract.data[\"elements\"][0]\n elif isinstance(element, ChildFunction):\n if element.function:\n function = Output(\"\")\n function.add_function(element.function)\n return function.data[\"elements\"][0]\n return None\n\n\nSupportedOutput = Union[Variable, Contract, Function, Enum, Event, Structure, Pragma, Node]\nAllSupportedOutput = Union[str, SupportedOutput]\n\n\nclass Output:\n def __init__(\n self,\n info_: Union[str, List[Union[str, SupportedOutput]]],\n additional_fields: Optional[Dict] = None,\n markdown_root=\"\",\n standard_format=True,\n ):\n if additional_fields is None:\n additional_fields = {}\n\n # Allow info to be a string to simplify the API\n info: List[Union[str, SupportedOutput]]\n if isinstance(info_, str):\n info = [info_]\n else:\n info = info_\n\n self._data: Dict[str, Any] = OrderedDict()\n self._data[\"elements\"] = []\n self._data[\"description\"] = \"\".join(_convert_to_description(d) for d in info)\n self._data[\"markdown\"] = \"\".join(_convert_to_markdown(d, markdown_root) for d in info)\n self._data[\"first_markdown_element\"] = \"\"\n self._markdown_root = markdown_root\n\n id_txt = \"\".join(_convert_to_id(d) for d in info)\n self._data[\"id\"] = hashlib.sha3_256(id_txt.encode(\"utf-8\")).hexdigest()\n\n if standard_format:\n to_add = [i for i in info if not isinstance(i, str)]\n\n for add in to_add:\n self.add(add)\n\n if additional_fields:\n self._data[\"additional_fields\"] = additional_fields\n\n def add(self, add: SupportedOutput, additional_fields: Optional[Dict] = None):\n if not self._data[\"first_markdown_element\"]:\n self._data[\"first_markdown_element\"] = add.source_mapping_to_markdown(\n self._markdown_root\n )\n if isinstance(add, Variable):\n self.add_variable(add, additional_fields=additional_fields)\n elif isinstance(add, Contract):\n self.add_contract(add, additional_fields=additional_fields)\n elif isinstance(add, Function):\n self.add_function(add, additional_fields=additional_fields)\n elif isinstance(add, Enum):\n self.add_enum(add, additional_fields=additional_fields)\n elif isinstance(add, Event):\n self.add_event(add, additional_fields=additional_fields)\n elif isinstance(add, Structure):\n self.add_struct(add, additional_fields=additional_fields)\n elif isinstance(add, Pragma):\n self.add_pragma(add, additional_fields=additional_fields)\n elif isinstance(add, Node):\n self.add_node(add, additional_fields=additional_fields)\n else:\n raise SlitherError(f\"Impossible to add {type(add)} to the json\")\n\n @property\n def data(self) -> Dict:\n return self._data\n\n @property\n def elements(self) -> List[Dict]:\n return self._data[\"elements\"]\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Variables\n ###################################################################################\n ###################################################################################\n\n def add_variable(self, variable: Variable, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(variable)}\n element = _create_base_element(\n \"variable\",\n variable.name,\n variable.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_variables(self, variables: List[Variable]):\n for variable in sorted(variables, key=lambda x: x.name):\n self.add_variable(variable)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Contract\n ###################################################################################\n ###################################################################################\n\n def add_contract(self, contract: Contract, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n element = _create_base_element(\n \"contract\", contract.name, contract.source_mapping, {}, additional_fields\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Functions\n ###################################################################################\n ###################################################################################\n\n def add_function(self, function: Function, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(function),\n \"signature\": function.full_name,\n }\n element = _create_base_element(\n \"function\",\n function.name,\n function.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_functions(self, functions: List[Function], additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n for function in sorted(functions, key=lambda x: x.name):\n self.add_function(function, additional_fields)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Enum\n ###################################################################################\n ###################################################################################\n\n def add_enum(self, enum: Enum, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(enum)}\n element = _create_base_element(\n \"enum\",\n enum.name,\n enum.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Structures\n ###################################################################################\n ###################################################################################\n\n def add_struct(self, struct: Structure, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"parent\": _create_parent_element(struct)}\n element = _create_base_element(\n \"struct\",\n struct.name,\n struct.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Events\n ###################################################################################\n ###################################################################################\n\n def add_event(self, event: Event, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(event),\n \"signature\": event.full_name,\n }\n element = _create_base_element(\n \"event\",\n event.name,\n event.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Nodes\n ###################################################################################\n ###################################################################################\n\n def add_node(self, node: Node, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\n \"parent\": _create_parent_element(node),\n }\n node_name = str(node.expression) if node.expression else \"\"\n element = _create_base_element(\n \"node\",\n node_name,\n node.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n def add_nodes(self, nodes: List[Node]):\n for node in sorted(nodes, key=lambda x: x.node_id):\n self.add_node(node)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Pragma\n ###################################################################################\n ###################################################################################\n\n def add_pragma(self, pragma: Pragma, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"directive\": pragma.directive}\n element = _create_base_element(\n \"pragma\",\n pragma.version,\n pragma.source_mapping,\n type_specific_fields,\n additional_fields,\n )\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region File\n ###################################################################################\n ###################################################################################\n\n def add_file(self, filename: str, content: str, additional_fields: Optional[Dict] = None):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"filename\": filename, \"content\": content}\n element = _create_base_element(\"file\", type_specific_fields, additional_fields)\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Pretty Table\n ###################################################################################\n ###################################################################################\n\n def add_pretty_table(\n self,\n content: MyPrettyTable,\n name: str,\n additional_fields: Optional[Dict] = None,\n ):\n if additional_fields is None:\n additional_fields = {}\n type_specific_fields = {\"content\": content.to_json(), \"name\": name}\n element = _create_base_element(\"pretty_table\", type_specific_fields, additional_fields)\n\n self._data[\"elements\"].append(element)\n\n # endregion\n ###################################################################################\n ###################################################################################\n # region Others\n ###################################################################################\n ###################################################################################\n\n def add_other(\n self,\n name: str,\n source_mapping,\n compilation_unit: \"SlitherCompilationUnit\",\n additional_fields: Optional[Dict] = None,\n ):\n # If this a tuple with (filename, start, end), convert it to a source mapping.\n if additional_fields is None:\n additional_fields = {}\n if isinstance(source_mapping, tuple):\n # Parse the source id\n (filename, start, end) = source_mapping\n source_id = next(\n (\n source_unit_id\n for (\n source_unit_id,\n source_unit_filename,\n ) in compilation_unit.source_units.items()\n if source_unit_filename == filename\n ),\n -1,\n )\n\n # Convert to a source mapping string\n source_mapping = f\"{start}:{end}:{source_id}\"\n\n # If this is a source mapping string, parse it.\n if isinstance(source_mapping, str):\n source_mapping_str = source_mapping\n source_mapping = SourceMapping()\n source_mapping.set_offset(source_mapping_str, compilation_unit)\n\n # If this is a source mapping object, get the underlying source mapping dictionary\n if isinstance(source_mapping, SourceMapping):\n source_mapping = source_mapping.source_mapping\n\n # Create the underlying element and add it to our resulting json\n element = _create_base_element(\"other\", name, source_mapping, {}, additional_fields)\n self._data[\"elements\"].append(element)\n", "path": "slither/utils/output.py" } ]
diff --git a/slither/utils/output.py b/slither/utils/output.py index 16c9f5bbf0..6ee59b4c6f 100644 --- a/slither/utils/output.py +++ b/slither/utils/output.py @@ -160,7 +160,7 @@ def output_to_sarif( ], } - for detector in results["detectors"]: + for detector in results.get("detectors", []): _output_result_to_sarif(detector, detectors_classes, sarif) if filename == "-":
facebookresearch__ParlAI-4892
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained. **Bug description** Please enter a clear and concise description of what the bug is. When I execute: ```sh $python -m parlai eval_model --task fromfile:parlaiformat\ --fromfile_datapath "${test_set_path}" \ -mf zoo:saferdialogues/model\ -bs 1\ --world-logs $test_set_path.SafeRDialog_parlai.jsonl\ --no-cuda ``` It report: ```sh 16:13:53 | Overriding opt["task"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified) 16:13:53 | Overriding opt["no_cuda"] to True (previously: False) >>>using / style agent path >>>finally module name: parlai.agents.transformer.generator 16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict 16:13:53 | num words = 8008 Traceback (most recent call last): File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 18, in <module> main() File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 14, in main superscript_main() File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 325, in superscript_main return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 108, in _run_from_parser_and_opt return script.run() File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 265, in run return eval_model(self.opt) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 233, in eval_model agent = create_agent(opt, requireModelExists=True) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 468, in create_agent model = create_agent_from_opt_file(opt) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 421, in create_agent_from_opt_file return model_class(opt_from_file) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py", line 462, in __init__ super().__init__(opt, shared) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 783, in __init__ self.dict = self.build_dictionary() File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 862, in build_dictionary d = self.dictionary_class()(self.opt) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py", line 322, in __init__ self.bpe = bpe_factory(opt, shared) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 68, in bpe_factory bpe_helper = HuggingFaceBpeHelper(opt, shared) File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 841, in __init__ raise IOError( OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained. ``` and the parlai version is: `1.6.0`. **Reproduction steps** Enter steps to reproduce the behavior. **Expected behavior** Give a clear and concise description of what you expected to happen. **Logs** Please paste the command line output: ``` Output goes here ``` **Additional context** Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.1'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py" } ]
diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py index 0b2ab421822..ed8e5946a86 100644 --- a/parlai/zoo/saferdialogues/build.py +++ b/parlai/zoo/saferdialogues/build.py @@ -13,7 +13,7 @@ def download(datapath): opt = {'datapath': datapath} - version = 'v0.1' + version = 'v0.2' fnames = [f'models_{version}.tar.gz'] download_models( opt,
pypa__pip-9333
Move slow check_manifest out of .pre-commit-config.yaml The check-manifest pre-commit hook is by far the slowest of the checks, in particular when committing a few files. When pre-commit is installed locally as a pre-commit hook, it tends to slow down the development flow significantly. Could we move it out of the default pre-commit config to a regular CI check ?
[ { "content": "\"\"\"Automation using nox.\n\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nimport glob\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\n\nimport nox\n\nsys.path.append(\".\")\nfrom tools.automation import release # isort:skip # noqa\nsys.path.pop()\n\nnox.options.reuse_existing_virtualenvs = True\nnox.options.sessions = [\"lint\"]\n\nLOCATIONS = {\n \"common-wheels\": \"tests/data/common_wheels\",\n \"protected-pip\": \"tools/tox_pip.py\",\n}\nREQUIREMENTS = {\n \"docs\": \"tools/requirements/docs.txt\",\n \"tests\": \"tools/requirements/tests.txt\",\n \"common-wheels\": \"tools/requirements/tests-common_wheels.txt\",\n}\n\nAUTHORS_FILE = \"AUTHORS.txt\"\nVERSION_FILE = \"src/pip/__init__.py\"\n\n\ndef run_with_protected_pip(session, *arguments):\n \"\"\"Do a session.run(\"pip\", *arguments), using a \"protected\" pip.\n\n This invokes a wrapper script, that forwards calls to original virtualenv\n (stable) version, and not the code being tested. This ensures pip being\n used is not the code being tested.\n \"\"\"\n env = {\"VIRTUAL_ENV\": session.virtualenv.location}\n\n command = (\"python\", LOCATIONS[\"protected-pip\"]) + arguments\n kwargs = {\"env\": env, \"silent\": True}\n session.run(*command, **kwargs)\n\n\ndef should_update_common_wheels():\n # If the cache hasn't been created, create it.\n if not os.path.exists(LOCATIONS[\"common-wheels\"]):\n return True\n\n # If the requirements was updated after cache, we'll repopulate it.\n cache_last_populated_at = os.path.getmtime(LOCATIONS[\"common-wheels\"])\n requirements_updated_at = os.path.getmtime(REQUIREMENTS[\"common-wheels\"])\n need_to_repopulate = requirements_updated_at > cache_last_populated_at\n\n # Clear the stale cache.\n if need_to_repopulate:\n shutil.rmtree(LOCATIONS[\"common-wheels\"], ignore_errors=True)\n\n return need_to_repopulate\n\n\n# -----------------------------------------------------------------------------\n# Development Commands\n# These are currently prototypes to evaluate whether we want to switch over\n# completely to nox for all our automation. Contributors should prefer using\n# `tox -e ...` until this note is removed.\n# -----------------------------------------------------------------------------\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"pypy3\"])\ndef test(session):\n # Get the common wheels.\n if should_update_common_wheels():\n run_with_protected_pip(\n session,\n \"wheel\",\n \"-w\", LOCATIONS[\"common-wheels\"],\n \"-r\", REQUIREMENTS[\"common-wheels\"],\n )\n else:\n msg = (\n \"Re-using existing common-wheels at {}.\"\n .format(LOCATIONS[\"common-wheels\"])\n )\n session.log(msg)\n\n # Build source distribution\n sdist_dir = os.path.join(session.virtualenv.location, \"sdist\")\n if os.path.exists(sdist_dir):\n shutil.rmtree(sdist_dir, ignore_errors=True)\n session.run(\n \"python\", \"setup.py\", \"sdist\",\n \"--formats=zip\", \"--dist-dir\", sdist_dir,\n silent=True,\n )\n generated_files = os.listdir(sdist_dir)\n assert len(generated_files) == 1\n generated_sdist = os.path.join(sdist_dir, generated_files[0])\n\n # Install source distribution\n run_with_protected_pip(session, \"install\", generated_sdist)\n\n # Install test dependencies\n run_with_protected_pip(session, \"install\", \"-r\", REQUIREMENTS[\"tests\"])\n\n # Parallelize tests as much as possible, by default.\n arguments = session.posargs or [\"-n\", \"auto\"]\n\n # Run the tests\n # LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our\n # tests use.\n session.run(\"pytest\", *arguments, env={\"LC_CTYPE\": \"en_US.UTF-8\"})\n\n\[email protected]\ndef docs(session):\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"])\n\n def get_sphinx_build_command(kind):\n # Having the conf.py in the docs/html is weird but needed because we\n # can not use a different configuration directory vs source directory\n # on RTD currently. So, we'll pass \"-c docs/html\" here.\n # See https://github.com/rtfd/readthedocs.org/issues/1543.\n return [\n \"sphinx-build\",\n \"-W\",\n \"-c\", \"docs/html\", # see note above\n \"-d\", \"docs/build/doctrees/\" + kind,\n \"-b\", kind,\n \"docs/\" + kind,\n \"docs/build/\" + kind,\n ]\n\n session.run(*get_sphinx_build_command(\"html\"))\n session.run(*get_sphinx_build_command(\"man\"))\n\n\[email protected]\ndef lint(session):\n session.install(\"pre-commit\")\n\n if session.posargs:\n args = session.posargs + [\"--all-files\"]\n else:\n args = [\"--all-files\", \"--show-diff-on-failure\"]\n\n session.run(\"pre-commit\", \"run\", *args)\n\n\[email protected]\ndef vendoring(session):\n session.install(\"vendoring>=0.3.0\")\n\n if \"--upgrade\" not in session.posargs:\n session.run(\"vendoring\", \"sync\", \".\", \"-v\")\n return\n\n def pinned_requirements(path):\n for line in path.read_text().splitlines():\n one, two = line.split(\"==\", 1)\n name = one.strip()\n version = two.split(\"#\")[0].strip()\n yield name, version\n\n vendor_txt = Path(\"src/pip/_vendor/vendor.txt\")\n for name, old_version in pinned_requirements(vendor_txt):\n if name == \"setuptools\":\n continue\n\n # update requirements.txt\n session.run(\"vendoring\", \"update\", \".\", name)\n\n # get the updated version\n new_version = old_version\n for inner_name, inner_version in pinned_requirements(vendor_txt):\n if inner_name == name:\n # this is a dedicated assignment, to make flake8 happy\n new_version = inner_version\n break\n else:\n session.error(f\"Could not find {name} in {vendor_txt}\")\n\n # check if the version changed.\n if new_version == old_version:\n continue # no change, nothing more to do here.\n\n # synchronize the contents\n session.run(\"vendoring\", \"sync\", \".\")\n\n # Determine the correct message\n message = f\"Upgrade {name} to {new_version}\"\n\n # Write our news fragment\n news_file = Path(\"news\") / (name + \".vendor.rst\")\n news_file.write_text(message + \"\\n\") # \"\\n\" appeases end-of-line-fixer\n\n # Commit the changes\n release.commit_file(session, \".\", message=message)\n\n\n# -----------------------------------------------------------------------------\n# Release Commands\n# -----------------------------------------------------------------------------\[email protected](name=\"prepare-release\")\ndef prepare_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s prepare-release -- <version>\")\n\n session.log(\"# Ensure nothing is staged\")\n if release.modified_files_in_git(\"--staged\"):\n session.error(\"There are files staged in git\")\n\n session.log(f\"# Updating {AUTHORS_FILE}\")\n release.generate_authors(AUTHORS_FILE)\n if release.modified_files_in_git():\n release.commit_file(\n session, AUTHORS_FILE, message=f\"Update {AUTHORS_FILE}\",\n )\n else:\n session.log(f\"# No changes to {AUTHORS_FILE}\")\n\n session.log(\"# Generating NEWS\")\n release.generate_news(session, version)\n\n session.log(f\"# Bumping for release {version}\")\n release.update_version_file(version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for release\")\n\n session.log(\"# Tagging release\")\n release.create_git_tag(session, version, message=f\"Release {version}\")\n\n session.log(\"# Bumping for development\")\n next_dev_version = release.get_next_development_version(version)\n release.update_version_file(next_dev_version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for development\")\n\n\[email protected](name=\"build-release\")\ndef build_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s build-release -- YY.N[.P]\")\n\n session.log(\"# Ensure no files in dist/\")\n if release.have_files_in_folder(\"dist\"):\n session.error(\n \"There are files in dist/. Remove them and try again. \"\n \"You can use `git clean -fxdi -- dist` command to do this\"\n )\n\n session.log(\"# Install dependencies\")\n session.install(\"setuptools\", \"wheel\", \"twine\")\n\n with release.isolated_temporary_checkout(session, version) as build_dir:\n session.log(\n \"# Start the build in an isolated, \"\n f\"temporary Git checkout at {build_dir!s}\",\n )\n with release.workdir(session, build_dir):\n tmp_dists = build_dists(session)\n\n tmp_dist_paths = (build_dir / p for p in tmp_dists)\n session.log(f\"# Copying dists from {build_dir}\")\n os.makedirs('dist', exist_ok=True)\n for dist, final in zip(tmp_dist_paths, tmp_dists):\n session.log(f\"# Copying {dist} to {final}\")\n shutil.copy(dist, final)\n\n\ndef build_dists(session):\n \"\"\"Return dists with valid metadata.\"\"\"\n session.log(\n \"# Check if there's any Git-untracked files before building the wheel\",\n )\n\n has_forbidden_git_untracked_files = any(\n # Don't report the environment this session is running in\n not untracked_file.startswith('.nox/build-release/')\n for untracked_file in release.get_git_untracked_files()\n )\n if has_forbidden_git_untracked_files:\n session.error(\n \"There are untracked files in the working directory. \"\n \"Remove them and try again\",\n )\n\n session.log(\"# Build distributions\")\n session.run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", silent=True)\n produced_dists = glob.glob(\"dist/*\")\n\n session.log(f\"# Verify distributions: {', '.join(produced_dists)}\")\n session.run(\"twine\", \"check\", *produced_dists, silent=True)\n\n return produced_dists\n\n\[email protected](name=\"upload-release\")\ndef upload_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s upload-release -- YY.N[.P]\")\n\n session.log(\"# Install dependencies\")\n session.install(\"twine\")\n\n distribution_files = glob.glob(\"dist/*\")\n session.log(f\"# Distribution files: {distribution_files}\")\n\n # Sanity check: Make sure there's 2 distribution files.\n count = len(distribution_files)\n if count != 2:\n session.error(\n f\"Expected 2 distribution files for upload, got {count}. \"\n f\"Remove dist/ and run 'nox -s build-release -- {version}'\"\n )\n # Sanity check: Make sure the files are correctly named.\n distfile_names = map(os.path.basename, distribution_files)\n expected_distribution_files = [\n f\"pip-{version}-py2.py3-none-any.whl\",\n f\"pip-{version}.tar.gz\",\n ]\n if sorted(distfile_names) != sorted(expected_distribution_files):\n session.error(\n f\"Distribution files do not seem to be for {version} release.\"\n )\n\n session.log(\"# Upload distributions\")\n session.run(\"twine\", \"upload\", *distribution_files)\n", "path": "noxfile.py" } ]
[ { "content": "\"\"\"Automation using nox.\n\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nimport glob\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\n\nimport nox\n\nsys.path.append(\".\")\nfrom tools.automation import release # isort:skip # noqa\nsys.path.pop()\n\nnox.options.reuse_existing_virtualenvs = True\nnox.options.sessions = [\"lint\"]\n\nLOCATIONS = {\n \"common-wheels\": \"tests/data/common_wheels\",\n \"protected-pip\": \"tools/tox_pip.py\",\n}\nREQUIREMENTS = {\n \"docs\": \"tools/requirements/docs.txt\",\n \"tests\": \"tools/requirements/tests.txt\",\n \"common-wheels\": \"tools/requirements/tests-common_wheels.txt\",\n}\n\nAUTHORS_FILE = \"AUTHORS.txt\"\nVERSION_FILE = \"src/pip/__init__.py\"\n\n\ndef run_with_protected_pip(session, *arguments):\n \"\"\"Do a session.run(\"pip\", *arguments), using a \"protected\" pip.\n\n This invokes a wrapper script, that forwards calls to original virtualenv\n (stable) version, and not the code being tested. This ensures pip being\n used is not the code being tested.\n \"\"\"\n env = {\"VIRTUAL_ENV\": session.virtualenv.location}\n\n command = (\"python\", LOCATIONS[\"protected-pip\"]) + arguments\n kwargs = {\"env\": env, \"silent\": True}\n session.run(*command, **kwargs)\n\n\ndef should_update_common_wheels():\n # If the cache hasn't been created, create it.\n if not os.path.exists(LOCATIONS[\"common-wheels\"]):\n return True\n\n # If the requirements was updated after cache, we'll repopulate it.\n cache_last_populated_at = os.path.getmtime(LOCATIONS[\"common-wheels\"])\n requirements_updated_at = os.path.getmtime(REQUIREMENTS[\"common-wheels\"])\n need_to_repopulate = requirements_updated_at > cache_last_populated_at\n\n # Clear the stale cache.\n if need_to_repopulate:\n shutil.rmtree(LOCATIONS[\"common-wheels\"], ignore_errors=True)\n\n return need_to_repopulate\n\n\n# -----------------------------------------------------------------------------\n# Development Commands\n# These are currently prototypes to evaluate whether we want to switch over\n# completely to nox for all our automation. Contributors should prefer using\n# `tox -e ...` until this note is removed.\n# -----------------------------------------------------------------------------\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\", \"3.9\", \"pypy\", \"pypy3\"])\ndef test(session):\n # Get the common wheels.\n if should_update_common_wheels():\n run_with_protected_pip(\n session,\n \"wheel\",\n \"-w\", LOCATIONS[\"common-wheels\"],\n \"-r\", REQUIREMENTS[\"common-wheels\"],\n )\n else:\n msg = (\n \"Re-using existing common-wheels at {}.\"\n .format(LOCATIONS[\"common-wheels\"])\n )\n session.log(msg)\n\n # Build source distribution\n sdist_dir = os.path.join(session.virtualenv.location, \"sdist\")\n if os.path.exists(sdist_dir):\n shutil.rmtree(sdist_dir, ignore_errors=True)\n session.run(\n \"python\", \"setup.py\", \"sdist\",\n \"--formats=zip\", \"--dist-dir\", sdist_dir,\n silent=True,\n )\n generated_files = os.listdir(sdist_dir)\n assert len(generated_files) == 1\n generated_sdist = os.path.join(sdist_dir, generated_files[0])\n\n # Install source distribution\n run_with_protected_pip(session, \"install\", generated_sdist)\n\n # Install test dependencies\n run_with_protected_pip(session, \"install\", \"-r\", REQUIREMENTS[\"tests\"])\n\n # Parallelize tests as much as possible, by default.\n arguments = session.posargs or [\"-n\", \"auto\"]\n\n # Run the tests\n # LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our\n # tests use.\n session.run(\"pytest\", *arguments, env={\"LC_CTYPE\": \"en_US.UTF-8\"})\n\n\[email protected]\ndef docs(session):\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"])\n\n def get_sphinx_build_command(kind):\n # Having the conf.py in the docs/html is weird but needed because we\n # can not use a different configuration directory vs source directory\n # on RTD currently. So, we'll pass \"-c docs/html\" here.\n # See https://github.com/rtfd/readthedocs.org/issues/1543.\n return [\n \"sphinx-build\",\n \"-W\",\n \"-c\", \"docs/html\", # see note above\n \"-d\", \"docs/build/doctrees/\" + kind,\n \"-b\", kind,\n \"docs/\" + kind,\n \"docs/build/\" + kind,\n ]\n\n session.run(*get_sphinx_build_command(\"html\"))\n session.run(*get_sphinx_build_command(\"man\"))\n\n\[email protected]\ndef lint(session):\n session.install(\"pre-commit\")\n\n if session.posargs:\n args = session.posargs + [\"--all-files\"]\n else:\n args = [\"--all-files\", \"--show-diff-on-failure\"]\n\n session.run(\"pre-commit\", \"run\", *args)\n session.run(\n \"pre-commit\", \"run\", \"-c\", \".pre-commit-config-slow.yaml\", *args\n )\n\n\[email protected]\ndef vendoring(session):\n session.install(\"vendoring>=0.3.0\")\n\n if \"--upgrade\" not in session.posargs:\n session.run(\"vendoring\", \"sync\", \".\", \"-v\")\n return\n\n def pinned_requirements(path):\n for line in path.read_text().splitlines():\n one, two = line.split(\"==\", 1)\n name = one.strip()\n version = two.split(\"#\")[0].strip()\n yield name, version\n\n vendor_txt = Path(\"src/pip/_vendor/vendor.txt\")\n for name, old_version in pinned_requirements(vendor_txt):\n if name == \"setuptools\":\n continue\n\n # update requirements.txt\n session.run(\"vendoring\", \"update\", \".\", name)\n\n # get the updated version\n new_version = old_version\n for inner_name, inner_version in pinned_requirements(vendor_txt):\n if inner_name == name:\n # this is a dedicated assignment, to make flake8 happy\n new_version = inner_version\n break\n else:\n session.error(f\"Could not find {name} in {vendor_txt}\")\n\n # check if the version changed.\n if new_version == old_version:\n continue # no change, nothing more to do here.\n\n # synchronize the contents\n session.run(\"vendoring\", \"sync\", \".\")\n\n # Determine the correct message\n message = f\"Upgrade {name} to {new_version}\"\n\n # Write our news fragment\n news_file = Path(\"news\") / (name + \".vendor.rst\")\n news_file.write_text(message + \"\\n\") # \"\\n\" appeases end-of-line-fixer\n\n # Commit the changes\n release.commit_file(session, \".\", message=message)\n\n\n# -----------------------------------------------------------------------------\n# Release Commands\n# -----------------------------------------------------------------------------\[email protected](name=\"prepare-release\")\ndef prepare_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s prepare-release -- <version>\")\n\n session.log(\"# Ensure nothing is staged\")\n if release.modified_files_in_git(\"--staged\"):\n session.error(\"There are files staged in git\")\n\n session.log(f\"# Updating {AUTHORS_FILE}\")\n release.generate_authors(AUTHORS_FILE)\n if release.modified_files_in_git():\n release.commit_file(\n session, AUTHORS_FILE, message=f\"Update {AUTHORS_FILE}\",\n )\n else:\n session.log(f\"# No changes to {AUTHORS_FILE}\")\n\n session.log(\"# Generating NEWS\")\n release.generate_news(session, version)\n\n session.log(f\"# Bumping for release {version}\")\n release.update_version_file(version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for release\")\n\n session.log(\"# Tagging release\")\n release.create_git_tag(session, version, message=f\"Release {version}\")\n\n session.log(\"# Bumping for development\")\n next_dev_version = release.get_next_development_version(version)\n release.update_version_file(next_dev_version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for development\")\n\n\[email protected](name=\"build-release\")\ndef build_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s build-release -- YY.N[.P]\")\n\n session.log(\"# Ensure no files in dist/\")\n if release.have_files_in_folder(\"dist\"):\n session.error(\n \"There are files in dist/. Remove them and try again. \"\n \"You can use `git clean -fxdi -- dist` command to do this\"\n )\n\n session.log(\"# Install dependencies\")\n session.install(\"setuptools\", \"wheel\", \"twine\")\n\n with release.isolated_temporary_checkout(session, version) as build_dir:\n session.log(\n \"# Start the build in an isolated, \"\n f\"temporary Git checkout at {build_dir!s}\",\n )\n with release.workdir(session, build_dir):\n tmp_dists = build_dists(session)\n\n tmp_dist_paths = (build_dir / p for p in tmp_dists)\n session.log(f\"# Copying dists from {build_dir}\")\n os.makedirs('dist', exist_ok=True)\n for dist, final in zip(tmp_dist_paths, tmp_dists):\n session.log(f\"# Copying {dist} to {final}\")\n shutil.copy(dist, final)\n\n\ndef build_dists(session):\n \"\"\"Return dists with valid metadata.\"\"\"\n session.log(\n \"# Check if there's any Git-untracked files before building the wheel\",\n )\n\n has_forbidden_git_untracked_files = any(\n # Don't report the environment this session is running in\n not untracked_file.startswith('.nox/build-release/')\n for untracked_file in release.get_git_untracked_files()\n )\n if has_forbidden_git_untracked_files:\n session.error(\n \"There are untracked files in the working directory. \"\n \"Remove them and try again\",\n )\n\n session.log(\"# Build distributions\")\n session.run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", silent=True)\n produced_dists = glob.glob(\"dist/*\")\n\n session.log(f\"# Verify distributions: {', '.join(produced_dists)}\")\n session.run(\"twine\", \"check\", *produced_dists, silent=True)\n\n return produced_dists\n\n\[email protected](name=\"upload-release\")\ndef upload_release(session):\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s upload-release -- YY.N[.P]\")\n\n session.log(\"# Install dependencies\")\n session.install(\"twine\")\n\n distribution_files = glob.glob(\"dist/*\")\n session.log(f\"# Distribution files: {distribution_files}\")\n\n # Sanity check: Make sure there's 2 distribution files.\n count = len(distribution_files)\n if count != 2:\n session.error(\n f\"Expected 2 distribution files for upload, got {count}. \"\n f\"Remove dist/ and run 'nox -s build-release -- {version}'\"\n )\n # Sanity check: Make sure the files are correctly named.\n distfile_names = map(os.path.basename, distribution_files)\n expected_distribution_files = [\n f\"pip-{version}-py2.py3-none-any.whl\",\n f\"pip-{version}.tar.gz\",\n ]\n if sorted(distfile_names) != sorted(expected_distribution_files):\n session.error(\n f\"Distribution files do not seem to be for {version} release.\"\n )\n\n session.log(\"# Upload distributions\")\n session.run(\"twine\", \"upload\", *distribution_files)\n", "path": "noxfile.py" } ]
diff --git a/.pre-commit-config-slow.yaml b/.pre-commit-config-slow.yaml new file mode 100644 index 00000000000..2179c665769 --- /dev/null +++ b/.pre-commit-config-slow.yaml @@ -0,0 +1,7 @@ +# Slow pre-commit checks we don't want to run locally with each commit. + +repos: +- repo: https://github.com/mgedmin/check-manifest + rev: '0.43' + hooks: + - id: check-manifest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 838b1f24ebe..4eadea08c47 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -97,8 +97,3 @@ repos: entry: NEWS fragment files must be named *.(process|removal|feature|bugfix|vendor|doc|trivial).rst exclude: ^news/(.gitignore|.*\.(process|removal|feature|bugfix|vendor|doc|trivial).rst) files: ^news/ - -- repo: https://github.com/mgedmin/check-manifest - rev: '0.43' - hooks: - - id: check-manifest diff --git a/MANIFEST.in b/MANIFEST.in index 24d4553785b..2cf636ce3f7 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -17,6 +17,7 @@ exclude .appveyor.yml exclude .travis.yml exclude .readthedocs.yml exclude .pre-commit-config.yaml +exclude .pre-commit-config-slow.yaml exclude tox.ini exclude noxfile.py diff --git a/noxfile.py b/noxfile.py index 29e3959e463..372defef513 100644 --- a/noxfile.py +++ b/noxfile.py @@ -149,6 +149,9 @@ def lint(session): args = ["--all-files", "--show-diff-on-failure"] session.run("pre-commit", "run", *args) + session.run( + "pre-commit", "run", "-c", ".pre-commit-config-slow.yaml", *args + ) @nox.session diff --git a/tox.ini b/tox.ini index 9c20759af3a..e458e374b50 100644 --- a/tox.ini +++ b/tox.ini @@ -64,6 +64,7 @@ commands_pre = deps = pre-commit commands = pre-commit run [] --all-files --show-diff-on-failure + pre-commit run [] -c .pre-commit-config-slow.yaml --all-files --show-diff-on-failure [testenv:vendoring] basepython = python3
cornellius-gp__gpytorch-871
[Bug] SumBatchLazyTensor size is inconsistent with indices # 🐛 Bug I want to do a KroneckerProductLazyTensor on a batch of lazyTensor `x` times, then SumBatchLazyTensor and then get a specific row and finally evaluate. The code works if I first do an evaluation on the `sum_a` then retrieve the row (which is inefficient) but gives `size is inconsistent with indices` error if I retrieve the row first and then wants to evaluate. Interestingly, If I use the same number for the dimension -1 and -2, there would be no error then. ## To reproduce ** Code snippet to reproduce ** ```python import gpytorch x = 3 a = torch.rand((x, 5, 2, 3)) lazy_a = gpytorch.lazy.NonLazyTensor(a) assert lazy_a.shape == torch.Size([3, 5, 2, 3]) prod_a = gpytorch.lazy.KroneckerProductLazyTensor(*lazy_a) assert prod_a.shape == torch.Size([5, 8, 27]) sum_a = gpytorch.lazy.SumBatchLazyTensor(prod_a) assert sum_a.shape == torch.Size([8, 27]) assert sum_a.evaluate()[0].shape == torch.Size([27]) assert sum_a[0].evaluate().shape == torch.Size([27]) # gives error in here ``` ** Stack trace/error message ** ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-251-7cce10ce99d3> in <module>() 8 assert sum_a.shape == torch.Size([8, 27]) 9 assert sum_a.evaluate()[0].shape == torch.Size([27]) ---> 10 assert sum_a[0].evaluate().shape == torch.Size([27]) 11 9 frames /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/lazy_tensor.py in __getitem__(self, index) 1703 # with the appropriate shape 1704 if (squeeze_row or squeeze_col or row_col_are_absorbed): -> 1705 res = delazify(res) 1706 if squeeze_row: 1707 res = res.squeeze(-2) /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/lazy_tensor.py in delazify(obj) 1753 return obj 1754 elif isinstance(obj, LazyTensor): -> 1755 return obj.evaluate() 1756 else: 1757 raise TypeError("object of class {} cannot be made into a Tensor".format(obj.__class__.__name__)) /usr/local/lib/python3.6/dist-packages/gpytorch/utils/memoize.py in g(self, *args, **kwargs) 32 cache_name = name if name is not None else method 33 if not is_in_cache(self, cache_name): ---> 34 add_to_cache(self, cache_name, method(self, *args, **kwargs)) 35 return get_from_cache(self, cache_name) 36 /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/lazy_tensor.py in evaluate(self) 858 eye = torch.eye(num_rows, dtype=self.dtype, device=self.device) 859 eye = eye.expand(*self.batch_shape, num_rows, num_rows) --> 860 res = self.transpose(-1, -2).matmul(eye).transpose(-1, -2).contiguous() 861 else: 862 eye = torch.eye(num_cols, dtype=self.dtype, device=self.device) /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/lazy_tensor.py in matmul(self, other) 1093 1094 func = Matmul() -> 1095 return func.apply(self.representation_tree(), other, *self.representation()) 1096 1097 @property /usr/local/lib/python3.6/dist-packages/gpytorch/functions/_matmul.py in forward(ctx, representation_tree, rhs, *matrix_args) 18 19 lazy_tsr = ctx.representation_tree(*matrix_args) ---> 20 res = lazy_tsr._matmul(rhs) 21 22 to_save = [orig_rhs] + list(matrix_args) /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/block_lazy_tensor.py in _matmul(self, rhs) 64 65 rhs = self._add_batch_dim(rhs) ---> 66 res = self.base_lazy_tensor._matmul(rhs) 67 res = self._remove_batch_dim(res) 68 /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/interpolated_lazy_tensor.py in _matmul(self, rhs) 157 def _matmul(self, rhs): 158 # Get sparse tensor representations of left/right interp matrices --> 159 left_interp_t = self._sparse_left_interp_t(self.left_interp_indices, self.left_interp_values) 160 right_interp_t = self._sparse_right_interp_t(self.right_interp_indices, self.right_interp_values) 161 /usr/local/lib/python3.6/dist-packages/gpytorch/lazy/interpolated_lazy_tensor.py in _sparse_left_interp_t(self, left_interp_indices_tensor, left_interp_values_tensor) 309 310 left_interp_t = sparse.make_sparse_from_indices_and_values( --> 311 left_interp_indices_tensor, left_interp_values_tensor, self.base_lazy_tensor.size()[-1] 312 ) 313 self._left_interp_indices_memo = left_interp_indices_tensor /usr/local/lib/python3.6/dist-packages/gpytorch/utils/sparse.py in make_sparse_from_indices_and_values(interp_indices, interp_values, num_rows) 59 else: 60 cls = getattr(torch.sparse, type_name) ---> 61 res = cls(index_tensor, value_tensor, interp_size) 62 63 # Wrap things as a variable, if necessary RuntimeError: size is inconsistent with indices: for dim 1, size is 8 but found index 26 ``` ## Expected Behavior Expected to pass the tests. ## System information **Please complete the following information:** - GPyTorch Version 0.3.5 - PyTorch Version 1.2.0 - Ubuntu 18.04.3 LTS
[ { "content": "#!/usr/bin/env python3\n\nimport torch\nfrom .block_lazy_tensor import BlockLazyTensor\nfrom ..utils.broadcasting import _pad_with_singletons\nfrom ..utils.getitem import _noop_index\n\n\nclass SumBatchLazyTensor(BlockLazyTensor):\n \"\"\"\n Represents a lazy tensor that is actually the sum of several lazy tensors blocks.\n The :attr:`block_dim` attribute specifies which dimension of the base LazyTensor\n specifies the blocks.\n For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks (a `n x n` matrix).\n A `b x k x n x n` tensor represents `k` `b x n x n` blocks (a `b x n x n` batch matrix).\n\n Args:\n :attr:`base_lazy_tensor` (LazyTensor):\n A `k x n x n` LazyTensor, or a `b x k x n x n` LazyTensor.\n :attr:`block_dim` (int):\n The dimension that specifies the blocks.\n \"\"\"\n def _add_batch_dim(self, other):\n shape = list(other.shape)\n expand_shape = list(other.shape)\n shape.insert(-2, 1)\n expand_shape.insert(-2, self.base_lazy_tensor.size(-3))\n other = other.reshape(*shape).expand(*expand_shape)\n return other\n\n def _get_indices(self, row_index, col_index, *batch_indices):\n # Create an extra index for the summed dimension\n sum_index = torch.arange(0, self.base_lazy_tensor.size(-3), device=self.device)\n sum_index = _pad_with_singletons(sum_index, row_index.dim(), 0)\n row_index = row_index.unsqueeze(-1)\n col_index = col_index.unsqueeze(-1)\n batch_indices = [index.unsqueeze(-1) for index in batch_indices]\n\n res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, sum_index)\n return res.sum(-1)\n\n def _getitem(self, row_index, col_index, *batch_indices):\n res = self.base_lazy_tensor._getitem(row_index, col_index, *batch_indices, _noop_index)\n return self.__class__(res, **self._kwargs)\n\n def _remove_batch_dim(self, other):\n return other.sum(-3)\n\n def _size(self):\n shape = list(self.base_lazy_tensor.shape)\n del shape[-3]\n return torch.Size(shape)\n\n def diag(self):\n diag = self.base_lazy_tensor.diag().sum(-2)\n return diag\n", "path": "gpytorch/lazy/sum_batch_lazy_tensor.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nimport torch\nfrom .block_lazy_tensor import BlockLazyTensor\nfrom ..utils.broadcasting import _pad_with_singletons\nfrom ..utils.getitem import _noop_index\n\n\nclass SumBatchLazyTensor(BlockLazyTensor):\n \"\"\"\n Represents a lazy tensor that is actually the sum of several lazy tensors blocks.\n The :attr:`block_dim` attribute specifies which dimension of the base LazyTensor\n specifies the blocks.\n For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks (a `n x n` matrix).\n A `b x k x n x n` tensor represents `k` `b x n x n` blocks (a `b x n x n` batch matrix).\n\n Args:\n :attr:`base_lazy_tensor` (LazyTensor):\n A `k x n x n` LazyTensor, or a `b x k x n x n` LazyTensor.\n :attr:`block_dim` (int):\n The dimension that specifies the blocks.\n \"\"\"\n def _add_batch_dim(self, other):\n shape = list(other.shape)\n expand_shape = list(other.shape)\n shape.insert(-2, 1)\n expand_shape.insert(-2, self.base_lazy_tensor.size(-3))\n other = other.reshape(*shape).expand(*expand_shape)\n return other\n\n def _get_indices(self, row_index, col_index, *batch_indices):\n # Create an extra index for the summed dimension\n sum_index = torch.arange(0, self.base_lazy_tensor.size(-3), device=self.device)\n sum_index = _pad_with_singletons(sum_index, row_index.dim(), 0)\n row_index = row_index.unsqueeze(-1)\n col_index = col_index.unsqueeze(-1)\n batch_indices = [index.unsqueeze(-1) for index in batch_indices]\n\n res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, sum_index)\n return res.sum(-1)\n\n def _getitem(self, row_index, col_index, *batch_indices):\n res = self.base_lazy_tensor._getitem(row_index, col_index, *batch_indices, _noop_index)\n return self.__class__(res, **self._kwargs)\n\n def _remove_batch_dim(self, other):\n return other.sum(-3)\n\n def _size(self):\n shape = list(self.base_lazy_tensor.shape)\n del shape[-3]\n return torch.Size(shape)\n\n def diag(self):\n diag = self.base_lazy_tensor.diag().sum(-2)\n return diag\n\n def evaluate(self):\n return self.base_lazy_tensor.evaluate().sum(dim=-3) # BlockLazyTensors always use dim3 for the block_dim\n", "path": "gpytorch/lazy/sum_batch_lazy_tensor.py" } ]
diff --git a/gpytorch/lazy/sum_batch_lazy_tensor.py b/gpytorch/lazy/sum_batch_lazy_tensor.py index 7d9f738c2..171b38379 100644 --- a/gpytorch/lazy/sum_batch_lazy_tensor.py +++ b/gpytorch/lazy/sum_batch_lazy_tensor.py @@ -54,3 +54,6 @@ def _size(self): def diag(self): diag = self.base_lazy_tensor.diag().sum(-2) return diag + + def evaluate(self): + return self.base_lazy_tensor.evaluate().sum(dim=-3) # BlockLazyTensors always use dim3 for the block_dim
pymedusa__Medusa-3547
codec can't encode characters in position 29-36 ### Before submitting your issue: Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed) **Branch/Commit:** feature/add-indexerids-to-db/4bdbd81 **OS:** windows **What you did:** Started up medusa while having the series `Tokyo Goul` added. With scene exceptions added from xem. **What happened:** The error below showed. **What you expected:** no error. **Logs:** ``` 2017-12-27 21:29:34 ERROR MAIN :: [4bdbd81] BraceMessage string formatting failed. Using representation instead. File "D:\JetBrains\PyCharm 2017.2.4\helpers\pydev\pydevd.py", line 1599, in <module> globals = debugger.run(setup['file'], None, None, is_module) File "D:\JetBrains\PyCharm 2017.2.4\helpers\pydev\pydevd.py", line 1026, in run pydev_imports.execfile(file, globals, locals) # execute the script File "D:/Development/Medusa5/start.py", line 7, in <module> main() File "D:/Development/Medusa5\medusa\__main__.py", line 2109, in main application.start(sys.argv[1:]) File "D:/Development/Medusa5\medusa\__main__.py", line 354, in start name_cache.build_name_cache() File "D:/Development/Medusa5\medusa\name_cache.py", line 128, in build_name_cache _cache_name(show) File "D:/Development/Medusa5\medusa\name_cache.py", line 116, in _cache_name 'names': ', '.join(names.keys()) File "D:/Development/Medusa5\medusa\logger\adapters\style.py", line 89, in log self.logger.log(level, brace_msg, **kwargs) File "D:\Python27\lib\logging\__init__.py", line 1489, in log self.logger.log(level, msg, *args, **kwargs) File "D:\Python27\lib\logging\__init__.py", line 1231, in log self._log(level, msg, args, **kwargs) File "D:\Python27\lib\logging\__init__.py", line 1286, in _log self.handle(record) File "D:\Python27\lib\logging\__init__.py", line 1296, in handle self.callHandlers(record) File "D:\Python27\lib\logging\__init__.py", line 1336, in callHandlers hdlr.handle(record) File "D:\Python27\lib\logging\__init__.py", line 759, in handle self.emit(record) File "D:\Python27\lib\logging\handlers.py", line 78, in emit logging.FileHandler.emit(self, record) File "D:\Python27\lib\logging\__init__.py", line 957, in emit StreamHandler.emit(self, record) File "D:\Python27\lib\logging\__init__.py", line 861, in emit msg = self.format(record) File "D:\Python27\lib\logging\__init__.py", line 734, in format return fmt.format(record) File "D:/Development/Medusa5\medusa\logger\__init__.py", line 546, in format msg = super(CensoredFormatter, self).format(record) File "D:\Python27\lib\logging\__init__.py", line 465, in format record.message = record.getMessage() File "D:\Python27\lib\logging\__init__.py", line 325, in getMessage msg = str(self.msg) File "D:/Development/Medusa5\medusa\init\logconfig.py", line 80, in __str__ result = text_type(self.fmt) File "D:/Development/Medusa5\medusa\logger\adapters\style.py", line 49, in __str__ ''.join(traceback.format_stack()), Traceback (most recent call last): File "D:/Development/Medusa5\medusa\logger\adapters\style.py", line 39, in __str__ return msg.format(*args, **kwargs) File "D:\Python27\lib\encodings\cp1252.py", line 12, in encode return codecs.charmap_encode(input,errors,encoding_table) UnicodeEncodeError: 'charmap' codec can't encode characters in position 29-36: character maps to <undefined> ``` I got it in my branch, but I doubt it was because of any of the changed I did.
[ { "content": "# coding=utf-8\n\n\"\"\"Style Adapters for Python logging.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport collections\nimport functools\nimport logging\nimport traceback\n\nfrom six import text_type\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass BraceMessage(object):\n \"\"\"Lazily convert a Brace-formatted message.\"\"\"\n\n def __init__(self, msg, *args, **kwargs):\n \"\"\"Initialize a lazy-formatted message.\"\"\"\n self.msg = msg\n self.args = args\n self.kwargs = kwargs\n\n def __str__(self):\n \"\"\"Convert to string.\"\"\"\n args = self.args\n kwargs = self.kwargs\n if args and len(args) == 1:\n if args[0] and isinstance(args[0], collections.Mapping):\n args = []\n kwargs = self.args[0]\n\n msg = str(self.msg)\n\n try:\n return msg.format(*args, **kwargs)\n except IndexError:\n try:\n return msg.format(kwargs)\n except IndexError:\n return msg\n except Exception:\n log.error(\n 'BraceMessage string formatting failed. '\n 'Using representation instead.\\n{0}'.format(\n ''.join(traceback.format_stack()),\n )\n )\n return repr(self)\n\n def __repr__(self):\n \"\"\"Convert to class representation.\"\"\"\n sep = ', '\n kw_repr = '{key}={value!r}'\n name = self.__class__.__name__\n args = sep.join(map(text_type, self.args))\n kwargs = sep.join(kw_repr.format(key=k, value=v)\n for k, v in self.kwargs.items())\n return '{cls}({args})'.format(\n cls=name,\n args=sep.join([repr(self.msg), args, kwargs])\n )\n\n def format(self, *args, **kwargs):\n \"\"\"Format a BraceMessage string.\"\"\"\n return str(self).format(*args, **kwargs)\n\n\nclass BraceAdapter(logging.LoggerAdapter):\n \"\"\"Adapt logger to use Brace-formatted messages.\"\"\"\n\n def __init__(self, logger, extra=None):\n \"\"\"Initialize the Brace adapter with a logger.\"\"\"\n super(BraceAdapter, self).__init__(logger, extra)\n self.debug = functools.partial(self.log, logging.DEBUG)\n self.info = functools.partial(self.log, logging.INFO)\n self.warning = functools.partial(self.log, logging.WARNING)\n self.error = functools.partial(self.log, logging.ERROR)\n self.critical = functools.partial(self.log, logging.CRITICAL)\n\n def log(self, level, msg, *args, **kwargs):\n \"\"\"Log a message at the specified level using Brace-formatting.\"\"\"\n if self.isEnabledFor(level):\n msg, kwargs = self.process(msg, kwargs)\n brace_msg = BraceMessage(msg, *args, **kwargs)\n self.logger.log(level, brace_msg, **kwargs)\n\n def exception(self, msg, *args, **kwargs):\n \"\"\"Add exception information before delegating to self.log.\"\"\"\n kwargs['exc_info'] = 1\n self.log(logging.ERROR, msg, *args, **kwargs)\n", "path": "medusa/logger/adapters/style.py" } ]
[ { "content": "# coding=utf-8\n\n\"\"\"Style Adapters for Python logging.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport collections\nimport functools\nimport logging\nimport traceback\n\nfrom six import text_type\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass BraceMessage(object):\n \"\"\"Lazily convert a Brace-formatted message.\"\"\"\n\n def __init__(self, msg, *args, **kwargs):\n \"\"\"Initialize a lazy-formatted message.\"\"\"\n self.msg = msg\n self.args = args\n self.kwargs = kwargs\n\n def __str__(self):\n \"\"\"Convert to string.\"\"\"\n args = self.args\n kwargs = self.kwargs\n if args and len(args) == 1:\n if args[0] and isinstance(args[0], collections.Mapping):\n args = []\n kwargs = self.args[0]\n\n msg = text_type(self.msg)\n\n try:\n return msg.format(*args, **kwargs)\n except IndexError:\n try:\n return msg.format(kwargs)\n except IndexError:\n return msg\n except Exception:\n log.error(\n 'BraceMessage string formatting failed. '\n 'Using representation instead.\\n{0}'.format(\n ''.join(traceback.format_stack()),\n )\n )\n return repr(self)\n\n def __repr__(self):\n \"\"\"Convert to class representation.\"\"\"\n sep = ', '\n kw_repr = '{key}={value!r}'\n name = self.__class__.__name__\n args = sep.join(map(text_type, self.args))\n kwargs = sep.join(kw_repr.format(key=k, value=v)\n for k, v in self.kwargs.items())\n return '{cls}({args})'.format(\n cls=name,\n args=sep.join([repr(self.msg), args, kwargs])\n )\n\n def format(self, *args, **kwargs):\n \"\"\"Format a BraceMessage string.\"\"\"\n return str(self).format(*args, **kwargs)\n\n\nclass BraceAdapter(logging.LoggerAdapter):\n \"\"\"Adapt logger to use Brace-formatted messages.\"\"\"\n\n def __init__(self, logger, extra=None):\n \"\"\"Initialize the Brace adapter with a logger.\"\"\"\n super(BraceAdapter, self).__init__(logger, extra)\n self.debug = functools.partial(self.log, logging.DEBUG)\n self.info = functools.partial(self.log, logging.INFO)\n self.warning = functools.partial(self.log, logging.WARNING)\n self.error = functools.partial(self.log, logging.ERROR)\n self.critical = functools.partial(self.log, logging.CRITICAL)\n\n def log(self, level, msg, *args, **kwargs):\n \"\"\"Log a message at the specified level using Brace-formatting.\"\"\"\n if self.isEnabledFor(level):\n msg, kwargs = self.process(msg, kwargs)\n brace_msg = BraceMessage(msg, *args, **kwargs)\n self.logger.log(level, brace_msg, **kwargs)\n\n def exception(self, msg, *args, **kwargs):\n \"\"\"Add exception information before delegating to self.log.\"\"\"\n kwargs['exc_info'] = 1\n self.log(logging.ERROR, msg, *args, **kwargs)\n", "path": "medusa/logger/adapters/style.py" } ]
diff --git a/medusa/logger/adapters/style.py b/medusa/logger/adapters/style.py index 483261e45b..cf280f6a5e 100644 --- a/medusa/logger/adapters/style.py +++ b/medusa/logger/adapters/style.py @@ -33,7 +33,7 @@ def __str__(self): args = [] kwargs = self.args[0] - msg = str(self.msg) + msg = text_type(self.msg) try: return msg.format(*args, **kwargs)
ethereum__consensus-specs-1522
Test coverage: attester slashings with duplicated attesting indices While updating to v0.9.1 I noticed that the spec allows validator indices to be duplicated in `IndexedAttestation`s. In Lighthouse our function to check the sorting of the indices was also (incorrectly) checking for duplicates, which wasn't picked up by any of our tests or the spec tests. I think it would be good to have a test vector with an attester slashing containing duplicated indices, if that is indeed the intended behaviour. For the signature, it's sufficient to add the signatures of the duplicated validators to the aggregate multiple times. Concretely, this attester slashing passes validation (pardon the Rust): ```rust let sig1 = Signature::new(&data1.tree_hash_root(), domain, &keypairs[0].sk); let sig2 = Signature::new(&data2.tree_hash_root(), domain, &keypairs[0].sk); let mut agg1 = AggregateSignature::new(); agg1.add(&sig1); agg1.add(&sig1); let mut agg2 = AggregateSignature::new(); agg2.add(&sig2); agg2.add(&sig2); let attestation_1 = IndexedAttestation { attesting_indices: vec![0, 0].into(), data: data1, signature: agg1, }; let attestation_2 = IndexedAttestation { attesting_indices: vec![0, 0].into(), data: data2, signature: agg2, }; let attester_slashing = AttesterSlashing { attestation_1, attestation_2, }; ``` Proposer index edge cases to test So thanks to Prysm testnet, an under-tested proposer property was found: the proposer index and shuffling can both be correct, but if the active validators don't match regular indices there can still be a problem. And it only is off after the first inactive validator index in the system, so some tests could look like they test shuffling well (or indirectly as by rewards or otherwise), but just missed it. TLDR: todo, add test cases for: - `proposer index > len(active validators)` (already have proposals, but not this edge case) - `proposer index > any slashed index` (already have slashings, but not this edge case). Larger validator numbers move the proposer index to be more random and more likely to be bigger than those other numbers, while the current smaller validator sets in tests are generally only 512 validators. (and increasing that would increase the total test size by approx. the same factor).
[ { "content": "import re\nfrom function_puller import (\n get_spec,\n SpecObject,\n)\nfrom argparse import ArgumentParser\nfrom typing import (\n Dict,\n Optional,\n)\n\n\nPHASE0_IMPORTS = '''from typing import (\n Any, Dict, Set, Sequence, Tuple, Optional\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n boolean, Container, List, Vector, uint64,\n Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils.bls import (\n bls_aggregate_signatures,\n bls_aggregate_pubkeys,\n bls_verify,\n bls_sign,\n)\n\nfrom eth2spec.utils.hash_function import hash\n'''\nPHASE1_IMPORTS = '''from typing import (\n Any, Dict, Set, Sequence, MutableSequence, NewType, Tuple, Union,\n)\nfrom math import (\n log2,\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import (\n hash_tree_root,\n is_zero,\n)\nfrom eth2spec.utils.ssz.ssz_typing import (\n BasicValue, Elements, BaseBytes, BaseList, SSZType,\n Container, List, Vector, ByteList, ByteVector, Bitlist, Bitvector, Bits,\n Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96,\n uint64, bit, boolean, byte,\n)\nfrom eth2spec.utils.bls import (\n bls_aggregate_pubkeys,\n bls_verify,\n bls_verify_multiple,\n bls_signature_to_G2,\n)\n\nfrom eth2spec.utils.hash_function import hash\n\n\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\nSUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: uint64) -> int:\n return (x - 1).bit_length()\n'''\nSUNDRY_FUNCTIONS = '''\n# Monkey patch hash cache\n_hash = hash\nhash_cache: Dict[bytes, Bytes32] = {}\n\n\ndef get_eth1_data(distance: uint64) -> Bytes32:\n return hash(distance)\n\n\ndef hash(x: bytes) -> Bytes32:\n if x not in hash_cache:\n hash_cache[x] = Bytes32(_hash(x))\n return hash_cache[x]\n\n\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache: Dict[Tuple[Bytes32, Bytes32, int, int], Sequence[ValidatorIndex]] = {}\n\n\ndef compute_committee(indices: Sequence[ValidatorIndex], # type: ignore\n seed: Bytes32,\n index: int,\n count: int) -> Sequence[ValidatorIndex]:\n param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)\n\n if param_hash not in committee_cache:\n committee_cache[param_hash] = _compute_committee(indices, seed, index, count)\n return committee_cache[param_hash]\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]) -> None:\n global_vars = globals()\n for k, v in preset.items():\n if k.startswith('DOMAIN_'):\n global_vars[k] = DomainType(v) # domain types are defined as bytes in the configs\n else:\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = compute_epoch_at_slot(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n'''\n\n\ndef remove_for_phase1(functions: Dict[str, str]):\n for key, value in functions.items():\n lines = value.split(\"\\n\")\n lines = filter(lambda s: \"[to be removed in phase 1]\" not in s, lines)\n functions[key] = \"\\n\".join(lines)\n\n\ndef strip_comments(raw: str) -> str:\n comment_line_regex = re.compile(r'^\\s+# ')\n lines = raw.split('\\n')\n out = []\n for line in lines:\n if not comment_line_regex.match(line):\n if ' #' in line:\n line = line[:line.index(' #')]\n out.append(line)\n return '\\n'.join(out)\n\n\ndef objects_to_spec(functions: Dict[str, str],\n custom_types: Dict[str, str],\n constants: Dict[str, str],\n ssz_objects: Dict[str, str],\n inserts: Dict[str, str],\n imports: Dict[str, str],\n ) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in custom_types.items()\n ]\n )\n )\n for k in list(functions):\n if \"ceillog2\" in k:\n del functions[k]\n functions_spec = '\\n\\n'.join(functions.values())\n for k in list(constants.keys()):\n if k.startswith('DOMAIN_'):\n constants[k] = f\"DomainType(({constants[k]}).to_bytes(length=4, byteorder='little'))\"\n if k == \"BLS12_381_Q\":\n constants[k] += \" # noqa: E501\"\n constants_spec = '\\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants))\n ssz_objects_instantiation_spec = '\\n\\n'.join(ssz_objects.values())\n ssz_objects_reinitialization_spec = (\n 'def init_SSZ_types() -> None:\\n global_vars = globals()\\n\\n '\n + '\\n\\n '.join([strip_comments(re.sub(r'(?!\\n\\n)\\n', r'\\n ', value[:-1]))\n for value in ssz_objects.values()])\n + '\\n\\n'\n + '\\n'.join(map(lambda x: ' global_vars[\\'%s\\'] = %s' % (x, x), ssz_objects.keys()))\n )\n spec = (\n imports\n + '\\n\\n' + new_type_definitions\n + '\\n' + SUNDRY_CONSTANTS_FUNCTIONS\n + '\\n\\n' + constants_spec\n + '\\n\\n\\n' + ssz_objects_instantiation_spec\n + '\\n\\n' + functions_spec\n + '\\n' + SUNDRY_FUNCTIONS\n + '\\n\\n' + ssz_objects_reinitialization_spec\n + '\\n'\n )\n # Handle @inserts\n for key, value in inserts.items():\n spec = re.sub('[ ]*# %s\\\\n' % key, value, spec)\n return spec\n\n\ndef combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_functions.items():\n old_functions[key] = value\n return old_functions\n\n\ndef combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_constants.items():\n old_constants[key] = value\n return old_constants\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'Hash', 'BLSPubkey', 'BLSSignature', 'ByteList', 'ByteVector'\n 'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteVector' # to be removed after updating spec doc\n]\n\n\ndef dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n if key in old_objects:\n # remove trailing newline\n old_objects[key] = old_objects[key]\n # remove leading variable name\n value = re.sub(r'^class [\\w]*\\(Container\\):\\n', '', value)\n old_objects[key] = old_objects.get(key, '') + value\n dependency_order_ssz_objects(old_objects, custom_types)\n return old_objects\n\n\n# inserts are handeled the same way as functions\ncombine_inserts = combine_functions\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n functions0, custom_types0, constants0, ssz_objects0, inserts0 = spec0\n functions1, custom_types1, constants1, ssz_objects1, inserts1 = spec1\n functions = combine_functions(functions0, functions1)\n custom_types = combine_constants(custom_types0, custom_types1)\n constants = combine_constants(constants0, constants1)\n ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)\n inserts = combine_inserts(inserts0, inserts1)\n return functions, custom_types, constants, ssz_objects, inserts\n\n\ndef build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,\n v_guide_sourcefile: str, outfile: str=None) -> Optional[str]:\n phase0_spec = get_spec(phase0_sourcefile)\n fork_choice_spec = get_spec(fork_choice_sourcefile)\n v_guide = get_spec(v_guide_sourcefile)\n spec_objects = phase0_spec\n for value in [fork_choice_spec, v_guide]:\n spec_objects = combine_spec_objects(spec_objects, value)\n spec = objects_to_spec(*spec_objects, PHASE0_IMPORTS)\n if outfile is not None:\n with open(outfile, 'w') as out:\n out.write(spec)\n return spec\n\n\ndef build_phase1_spec(phase0_beacon_sourcefile: str,\n phase0_fork_choice_sourcefile: str,\n merkle_proofs_sourcefile: str,\n phase1_custody_sourcefile: str,\n phase1_shard_sourcefile: str,\n phase1_beacon_misc_sourcefile: str,\n outfile: str=None) -> Optional[str]:\n all_sourcefiles = (\n phase0_beacon_sourcefile,\n phase0_fork_choice_sourcefile,\n merkle_proofs_sourcefile,\n phase1_custody_sourcefile,\n phase1_shard_sourcefile,\n phase1_beacon_misc_sourcefile,\n )\n all_spescs = [get_spec(spec) for spec in all_sourcefiles]\n for spec in all_spescs:\n remove_for_phase1(spec[0])\n spec_objects = all_spescs[0]\n for value in all_spescs[1:]:\n spec_objects = combine_spec_objects(spec_objects, value)\n spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS)\n if outfile is not None:\n with open(outfile, 'w') as out:\n out.write(spec)\n return spec\n\n\nif __name__ == '__main__':\n description = '''\nBuild the specs from the md docs.\nIf building phase 0:\n 1st argument is input /core/0_beacon-chain.md\n 2nd argument is input /core/0_fork-choice.md\n 3rd argument is input /core/0_beacon-chain-validator.md\n 4th argument is output spec.py\n\nIf building phase 1:\n 1st argument is input /core/0_beacon-chain.md\n 2nd argument is input /core/0_fork-choice.md\n 3rd argument is input /light_client/merkle_proofs.md\n 4th argument is input /core/1_custody-game.md\n 5th argument is input /core/1_shard-data-chains.md\n 6th argument is input /core/1_beacon-chain-misc.md\n 7th argument is output spec.py\n'''\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-p\", \"--phase\", dest=\"phase\", type=int, default=0, help=\"Build for phase #\")\n parser.add_argument(dest=\"files\", help=\"Input and output files\", nargs=\"+\")\n\n args = parser.parse_args()\n if args.phase == 0:\n if len(args.files) == 4:\n build_phase0_spec(*args.files)\n else:\n print(\" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.\")\n elif args.phase == 1:\n if len(args.files) == 7:\n build_phase1_spec(*args.files)\n else:\n print(\n \" Phase 1 requires input files as well as an output file:\\n\"\n \"\\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\\n\"\n \"\\t light_client: (merkle_proofs.md)\\n\"\n \"\\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md, 1_beacon-chain-misc.md)\\n\"\n \"\\t and output.py\"\n )\n else:\n print(\"Invalid phase: {0}\".format(args.phase))\n", "path": "scripts/build_spec.py" } ]
[ { "content": "import re\nfrom function_puller import (\n get_spec,\n SpecObject,\n)\nfrom argparse import ArgumentParser\nfrom typing import (\n Dict,\n Optional,\n)\n\n\nPHASE0_IMPORTS = '''from typing import (\n Any, Dict, Set, Sequence, Tuple, Optional\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n boolean, Container, List, Vector, uint64,\n Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils.bls import (\n bls_aggregate_signatures,\n bls_aggregate_pubkeys,\n bls_verify,\n bls_sign,\n)\n\nfrom eth2spec.utils.hash_function import hash\n'''\nPHASE1_IMPORTS = '''from typing import (\n Any, Dict, Set, Sequence, MutableSequence, NewType, Tuple, Union,\n)\nfrom math import (\n log2,\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import (\n hash_tree_root,\n is_zero,\n)\nfrom eth2spec.utils.ssz.ssz_typing import (\n BasicValue, Elements, BaseBytes, BaseList, SSZType,\n Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits,\n Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96,\n uint64, bit, boolean, byte,\n)\nfrom eth2spec.utils.bls import (\n bls_aggregate_pubkeys,\n bls_verify,\n bls_verify_multiple,\n bls_signature_to_G2,\n)\n\nfrom eth2spec.utils.hash_function import hash\n\n\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\nSUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: uint64) -> int:\n return (x - 1).bit_length()\n'''\nSUNDRY_FUNCTIONS = '''\n# Monkey patch hash cache\n_hash = hash\nhash_cache: Dict[bytes, Bytes32] = {}\n\n\ndef get_eth1_data(distance: uint64) -> Bytes32:\n return hash(distance)\n\n\ndef hash(x: bytes) -> Bytes32: # type: ignore\n if x not in hash_cache:\n hash_cache[x] = Bytes32(_hash(x))\n return hash_cache[x]\n\n\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache: Dict[Tuple[Bytes32, Bytes32, int, int], Sequence[ValidatorIndex]] = {}\n\n\ndef compute_committee(indices: Sequence[ValidatorIndex], # type: ignore\n seed: Bytes32,\n index: int,\n count: int) -> Sequence[ValidatorIndex]:\n param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)\n\n if param_hash not in committee_cache:\n committee_cache[param_hash] = _compute_committee(indices, seed, index, count)\n return committee_cache[param_hash]\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]) -> None:\n global_vars = globals()\n for k, v in preset.items():\n if k.startswith('DOMAIN_'):\n global_vars[k] = DomainType(v) # domain types are defined as bytes in the configs\n else:\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = compute_epoch_at_slot(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n'''\n\n\ndef remove_for_phase1(functions: Dict[str, str]):\n for key, value in functions.items():\n lines = value.split(\"\\n\")\n lines = filter(lambda s: \"[to be removed in phase 1]\" not in s, lines)\n functions[key] = \"\\n\".join(lines)\n\n\ndef strip_comments(raw: str) -> str:\n comment_line_regex = re.compile(r'^\\s+# ')\n lines = raw.split('\\n')\n out = []\n for line in lines:\n if not comment_line_regex.match(line):\n if ' #' in line:\n line = line[:line.index(' #')]\n out.append(line)\n return '\\n'.join(out)\n\n\ndef objects_to_spec(functions: Dict[str, str],\n custom_types: Dict[str, str],\n constants: Dict[str, str],\n ssz_objects: Dict[str, str],\n inserts: Dict[str, str],\n imports: Dict[str, str],\n ) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in custom_types.items()\n ]\n )\n )\n for k in list(functions):\n if \"ceillog2\" in k:\n del functions[k]\n functions_spec = '\\n\\n'.join(functions.values())\n for k in list(constants.keys()):\n if k.startswith('DOMAIN_'):\n constants[k] = f\"DomainType(({constants[k]}).to_bytes(length=4, byteorder='little'))\"\n if k == \"BLS12_381_Q\":\n constants[k] += \" # noqa: E501\"\n constants_spec = '\\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants))\n ssz_objects_instantiation_spec = '\\n\\n'.join(ssz_objects.values())\n ssz_objects_reinitialization_spec = (\n 'def init_SSZ_types() -> None:\\n global_vars = globals()\\n\\n '\n + '\\n\\n '.join([strip_comments(re.sub(r'(?!\\n\\n)\\n', r'\\n ', value[:-1]))\n for value in ssz_objects.values()])\n + '\\n\\n'\n + '\\n'.join(map(lambda x: ' global_vars[\\'%s\\'] = %s' % (x, x), ssz_objects.keys()))\n )\n spec = (\n imports\n + '\\n\\n' + new_type_definitions\n + '\\n' + SUNDRY_CONSTANTS_FUNCTIONS\n + '\\n\\n' + constants_spec\n + '\\n\\n\\n' + ssz_objects_instantiation_spec\n + '\\n\\n' + functions_spec\n + '\\n' + SUNDRY_FUNCTIONS\n + '\\n\\n' + ssz_objects_reinitialization_spec\n + '\\n'\n )\n # Handle @inserts\n for key, value in inserts.items():\n spec = re.sub('[ ]*# %s\\\\n' % key, value, spec)\n return spec\n\n\ndef combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_functions.items():\n old_functions[key] = value\n return old_functions\n\n\ndef combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_constants.items():\n old_constants[key] = value\n return old_constants\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'Root', 'BLSPubkey', 'BLSSignature', 'Bytes', 'BytesN'\n 'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'BytesN' # to be removed after updating spec doc\n]\n\n\ndef dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n if key in old_objects:\n # remove trailing newline\n old_objects[key] = old_objects[key]\n # remove leading variable name\n value = re.sub(r'^class [\\w]*\\(Container\\):\\n', '', value)\n old_objects[key] = old_objects.get(key, '') + value\n dependency_order_ssz_objects(old_objects, custom_types)\n return old_objects\n\n\n# inserts are handeled the same way as functions\ncombine_inserts = combine_functions\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n functions0, custom_types0, constants0, ssz_objects0, inserts0 = spec0\n functions1, custom_types1, constants1, ssz_objects1, inserts1 = spec1\n functions = combine_functions(functions0, functions1)\n custom_types = combine_constants(custom_types0, custom_types1)\n constants = combine_constants(constants0, constants1)\n ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)\n inserts = combine_inserts(inserts0, inserts1)\n return functions, custom_types, constants, ssz_objects, inserts\n\n\ndef build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,\n v_guide_sourcefile: str, outfile: str=None) -> Optional[str]:\n phase0_spec = get_spec(phase0_sourcefile)\n fork_choice_spec = get_spec(fork_choice_sourcefile)\n v_guide = get_spec(v_guide_sourcefile)\n spec_objects = phase0_spec\n for value in [fork_choice_spec, v_guide]:\n spec_objects = combine_spec_objects(spec_objects, value)\n spec = objects_to_spec(*spec_objects, PHASE0_IMPORTS)\n if outfile is not None:\n with open(outfile, 'w') as out:\n out.write(spec)\n return spec\n\n\ndef build_phase1_spec(phase0_beacon_sourcefile: str,\n phase0_fork_choice_sourcefile: str,\n merkle_proofs_sourcefile: str,\n phase1_custody_sourcefile: str,\n phase1_shard_sourcefile: str,\n phase1_beacon_misc_sourcefile: str,\n outfile: str=None) -> Optional[str]:\n all_sourcefiles = (\n phase0_beacon_sourcefile,\n phase0_fork_choice_sourcefile,\n merkle_proofs_sourcefile,\n phase1_custody_sourcefile,\n phase1_shard_sourcefile,\n phase1_beacon_misc_sourcefile,\n )\n all_spescs = [get_spec(spec) for spec in all_sourcefiles]\n for spec in all_spescs:\n remove_for_phase1(spec[0])\n spec_objects = all_spescs[0]\n for value in all_spescs[1:]:\n spec_objects = combine_spec_objects(spec_objects, value)\n spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS)\n if outfile is not None:\n with open(outfile, 'w') as out:\n out.write(spec)\n return spec\n\n\nif __name__ == '__main__':\n description = '''\nBuild the specs from the md docs.\nIf building phase 0:\n 1st argument is input /core/0_beacon-chain.md\n 2nd argument is input /core/0_fork-choice.md\n 3rd argument is input /core/0_beacon-chain-validator.md\n 4th argument is output spec.py\n\nIf building phase 1:\n 1st argument is input /core/0_beacon-chain.md\n 2nd argument is input /core/0_fork-choice.md\n 3rd argument is input /light_client/merkle_proofs.md\n 4th argument is input /core/1_custody-game.md\n 5th argument is input /core/1_shard-data-chains.md\n 6th argument is input /core/1_beacon-chain-misc.md\n 7th argument is output spec.py\n'''\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-p\", \"--phase\", dest=\"phase\", type=int, default=0, help=\"Build for phase #\")\n parser.add_argument(dest=\"files\", help=\"Input and output files\", nargs=\"+\")\n\n args = parser.parse_args()\n if args.phase == 0:\n if len(args.files) == 4:\n build_phase0_spec(*args.files)\n else:\n print(\" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.\")\n elif args.phase == 1:\n if len(args.files) == 7:\n build_phase1_spec(*args.files)\n else:\n print(\n \" Phase 1 requires input files as well as an output file:\\n\"\n \"\\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\\n\"\n \"\\t light_client: (merkle_proofs.md)\\n\"\n \"\\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md, 1_beacon-chain-misc.md)\\n\"\n \"\\t and output.py\"\n )\n else:\n print(\"Invalid phase: {0}\".format(args.phase))\n", "path": "scripts/build_spec.py" } ]
diff --git a/.circleci/config.yml b/.circleci/config.yml index b612378e24..19ab1543aa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -103,6 +103,15 @@ jobs: command: make citest - store_test_results: path: test_libs/pyspec/test-reports + table_of_contents: + docker: + - image: circleci/node:10.16.3 + working_directory: ~/specs-repo + steps: + - checkout + - run: + name: Check table of contents + command: sudo npm install -g doctoc && make check_toc lint: docker: - image: circleci/python:3.6 @@ -148,6 +157,7 @@ workflows: - test: requires: - install_pyspec_test + - table_of_contents - lint: requires: - test diff --git a/Makefile b/Makefile index bfbc280701..2cdb1021ff 100644 --- a/Makefile +++ b/Makefile @@ -17,18 +17,22 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER #$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}]) PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py -PY_SPEC_PHASE_0_DEPS = $(SPEC_DIR)/core/0_*.md +PY_SPEC_PHASE_0_DEPS = $(wildcard $(SPEC_DIR)/core/0_*.md) PY_SPEC_PHASE_1_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase1/spec.py -PY_SPEC_PHASE_1_DEPS = $(SPEC_DIR)/core/1_*.md +PY_SPEC_PHASE_1_DEPS = $(wildcard $(SPEC_DIR)/core/1_*.md) + +PY_SPEC_ALL_DEPS = $(PY_SPEC_PHASE_0_DEPS) $(PY_SPEC_PHASE_1_DEPS) PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS) +MARKDOWN_FILES = $(PY_SPEC_ALL_DEPS) $(wildcard $(SPEC_DIR)/*.md) $(wildcard $(SPEC_DIR)/light_client/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md) + COV_HTML_OUT=.htmlcov COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html .PHONY: clean partial_clean all test citest lint generate_tests pyspec phase0 phase1 install_test open_cov \ - install_deposit_contract_test test_deposit_contract compile_deposit_contract + install_deposit_contract_test test_deposit_contract compile_deposit_contract check_toc all: $(PY_SPEC_ALL_TARGETS) @@ -65,6 +69,14 @@ citest: $(PY_SPEC_ALL_TARGETS) open_cov: ((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) & +check_toc: $(MARKDOWN_FILES:=.toc) + +%.toc: + cp $* $*.tmp && \ + doctoc $* && \ + diff -q $* $*.tmp && \ + rm $*.tmp + lint: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; \ flake8 --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec \ diff --git a/scripts/build_spec.py b/scripts/build_spec.py index c38d0bd65f..cca5a1bf9f 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -82,7 +82,7 @@ def get_eth1_data(distance: uint64) -> Bytes32: return hash(distance) -def hash(x: bytes) -> Bytes32: +def hash(x: bytes) -> Bytes32: # type: ignore if x not in hash_cache: hash_cache[x] = Bytes32(_hash(x)) return hash_cache[x] diff --git a/specs/bls_signature.md b/specs/bls_signature.md index 652279cd7f..284485afbf 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -6,23 +6,25 @@ ## Table of contents <!-- TOC --> - -- [BLS signature verification](#bls-signature-verification) - - [Table of contents](#table-of-contents) - - [Curve parameters](#curve-parameters) - - [Point representations](#point-representations) - - [G1 points](#g1-points) - - [G2 points](#g2-points) - - [Helpers](#helpers) - - [`hash_to_G2`](#hash_to_g2) - - [`modular_squareroot`](#modular_squareroot) - - [Aggregation operations](#aggregation-operations) - - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) - - [`bls_aggregate_signatures`](#bls_aggregate_signatures) - - [Signature verification](#signature-verification) - - [`bls_verify`](#bls_verify) - - [`bls_verify_multiple`](#bls_verify_multiple) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Curve parameters](#curve-parameters) +- [Point representations](#point-representations) + - [G1 points](#g1-points) + - [G2 points](#g2-points) +- [Helpers](#helpers) + - [`hash_to_G2`](#hash_to_g2) + - [`modular_squareroot`](#modular_squareroot) +- [Aggregation operations](#aggregation-operations) + - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) + - [`bls_aggregate_signatures`](#bls_aggregate_signatures) +- [Signature verification](#signature-verification) + - [`bls_verify`](#bls_verify) + - [`bls_verify_multiple`](#bls_verify_multiple) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Curve parameters diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index fca21994f7..e7c62d342b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -4,119 +4,123 @@ ## Table of contents <!-- TOC --> - -- [Ethereum 2.0 Phase 0 -- The Beacon Chain](#ethereum-20-phase-0----the-beacon-chain) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Notation](#notation) - - [Custom types](#custom-types) - - [Constants](#constants) - - [Configuration](#configuration) - - [Misc](#misc) - - [Gwei values](#gwei-values) - - [Initial values](#initial-values) - - [Time parameters](#time-parameters) - - [State list lengths](#state-list-lengths) - - [Rewards and penalties](#rewards-and-penalties) - - [Max operations per block](#max-operations-per-block) - - [Domain types](#domain-types) - - [Containers](#containers) - - [Misc dependencies](#misc-dependencies) - - [`Fork`](#fork) - - [`Checkpoint`](#checkpoint) - - [`Validator`](#validator) - - [`AttestationData`](#attestationdata) - - [`IndexedAttestation`](#indexedattestation) - - [`PendingAttestation`](#pendingattestation) - - [`Eth1Data`](#eth1data) - - [`HistoricalBatch`](#historicalbatch) - - [`DepositMessage`](#depositmessage) - - [`DepositData`](#depositdata) - - [`BeaconBlockHeader`](#beaconblockheader) - - [Beacon operations](#beacon-operations) - - [`ProposerSlashing`](#proposerslashing) - - [`AttesterSlashing`](#attesterslashing) - - [`Attestation`](#attestation) - - [`Deposit`](#deposit) - - [`VoluntaryExit`](#voluntaryexit) - - [Beacon blocks](#beacon-blocks) - - [`BeaconBlockBody`](#beaconblockbody) - - [`BeaconBlock`](#beaconblock) - - [Beacon state](#beacon-state) - - [`BeaconState`](#beaconstate) - - [Signed envelopes](#signed-envelopes) - - [`SignedVoluntaryExit`](#signedvoluntaryexit) - - [`SignedBeaconBlock`](#signedbeaconblock) - - [`SignedBeaconBlockHeader`](#signedbeaconblockheader) - - [Helper functions](#helper-functions) - - [Math](#math) - - [`integer_squareroot`](#integer_squareroot) - - [`xor`](#xor) - - [`int_to_bytes`](#int_to_bytes) - - [`bytes_to_int`](#bytes_to_int) - - [Crypto](#crypto) - - [`hash`](#hash) - - [`hash_tree_root`](#hash_tree_root) - - [`bls_verify`](#bls_verify) - - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) - - [Predicates](#predicates) - - [`is_active_validator`](#is_active_validator) - - [`is_slashable_validator`](#is_slashable_validator) - - [`is_slashable_attestation_data`](#is_slashable_attestation_data) - - [`is_valid_indexed_attestation`](#is_valid_indexed_attestation) - - [`is_valid_merkle_branch`](#is_valid_merkle_branch) - - [Misc](#misc-1) - - [`compute_shuffled_index`](#compute_shuffled_index) - - [`compute_proposer_index`](#compute_proposer_index) - - [`compute_committee`](#compute_committee) - - [`compute_epoch_at_slot`](#compute_epoch_at_slot) - - [`compute_start_slot_at_epoch`](#compute_start_slot_at_epoch) - - [`compute_activation_exit_epoch`](#compute_activation_exit_epoch) - - [`compute_domain`](#compute_domain) - - [Beacon state accessors](#beacon-state-accessors) - - [`get_current_epoch`](#get_current_epoch) - - [`get_previous_epoch`](#get_previous_epoch) - - [`get_block_root`](#get_block_root) - - [`get_block_root_at_slot`](#get_block_root_at_slot) - - [`get_randao_mix`](#get_randao_mix) - - [`get_active_validator_indices`](#get_active_validator_indices) - - [`get_validator_churn_limit`](#get_validator_churn_limit) - - [`get_seed`](#get_seed) - - [`get_committee_count_at_slot`](#get_committee_count_at_slot) - - [`get_beacon_committee`](#get_beacon_committee) - - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - - [`get_total_balance`](#get_total_balance) - - [`get_total_active_balance`](#get_total_active_balance) - - [`get_domain`](#get_domain) - - [`get_indexed_attestation`](#get_indexed_attestation) - - [`get_attesting_indices`](#get_attesting_indices) - - [Beacon state mutators](#beacon-state-mutators) - - [`increase_balance`](#increase_balance) - - [`decrease_balance`](#decrease_balance) - - [`initiate_validator_exit`](#initiate_validator_exit) - - [`slash_validator`](#slash_validator) - - [Genesis](#genesis) - - [Genesis state](#genesis-state) - - [Genesis block](#genesis-block) - - [Beacon chain state transition function](#beacon-chain-state-transition-function) - - [Epoch processing](#epoch-processing) - - [Helper functions](#helper-functions-1) - - [Justification and finalization](#justification-and-finalization) - - [Rewards and penalties](#rewards-and-penalties-1) - - [Registry updates](#registry-updates) - - [Slashings](#slashings) - - [Final updates](#final-updates) - - [Block processing](#block-processing) - - [Block header](#block-header) - - [RANDAO](#randao) - - [Eth1 data](#eth1-data) - - [Operations](#operations) - - [Proposer slashings](#proposer-slashings) - - [Attester slashings](#attester-slashings) - - [Attestations](#attestations) - - [Deposits](#deposits) - - [Voluntary exits](#voluntary-exits) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Notation](#notation) +- [Custom types](#custom-types) +- [Constants](#constants) +- [Configuration](#configuration) + - [Misc](#misc) + - [Gwei values](#gwei-values) + - [Initial values](#initial-values) + - [Time parameters](#time-parameters) + - [State list lengths](#state-list-lengths) + - [Rewards and penalties](#rewards-and-penalties) + - [Max operations per block](#max-operations-per-block) + - [Domain types](#domain-types) +- [Containers](#containers) + - [Misc dependencies](#misc-dependencies) + - [`Fork`](#fork) + - [`Checkpoint`](#checkpoint) + - [`Validator`](#validator) + - [`AttestationData`](#attestationdata) + - [`IndexedAttestation`](#indexedattestation) + - [`PendingAttestation`](#pendingattestation) + - [`Eth1Data`](#eth1data) + - [`HistoricalBatch`](#historicalbatch) + - [`DepositMessage`](#depositmessage) + - [`DepositData`](#depositdata) + - [`BeaconBlockHeader`](#beaconblockheader) + - [Beacon operations](#beacon-operations) + - [`ProposerSlashing`](#proposerslashing) + - [`AttesterSlashing`](#attesterslashing) + - [`Attestation`](#attestation) + - [`Deposit`](#deposit) + - [`VoluntaryExit`](#voluntaryexit) + - [Beacon blocks](#beacon-blocks) + - [`BeaconBlockBody`](#beaconblockbody) + - [`BeaconBlock`](#beaconblock) + - [Beacon state](#beacon-state) + - [`BeaconState`](#beaconstate) + - [Signed envelopes](#signed-envelopes) + - [`SignedVoluntaryExit`](#signedvoluntaryexit) + - [`SignedBeaconBlock`](#signedbeaconblock) + - [`SignedBeaconBlockHeader`](#signedbeaconblockheader) +- [Helper functions](#helper-functions) + - [Math](#math) + - [`integer_squareroot`](#integer_squareroot) + - [`xor`](#xor) + - [`int_to_bytes`](#int_to_bytes) + - [`bytes_to_int`](#bytes_to_int) + - [Crypto](#crypto) + - [`hash`](#hash) + - [`hash_tree_root`](#hash_tree_root) + - [`bls_verify`](#bls_verify) + - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) + - [Predicates](#predicates) + - [`is_active_validator`](#is_active_validator) + - [`is_eligible_for_activation_queue`](#is_eligible_for_activation_queue) + - [`is_eligible_for_activation`](#is_eligible_for_activation) + - [`is_slashable_validator`](#is_slashable_validator) + - [`is_slashable_attestation_data`](#is_slashable_attestation_data) + - [`is_valid_indexed_attestation`](#is_valid_indexed_attestation) + - [`is_valid_merkle_branch`](#is_valid_merkle_branch) + - [Misc](#misc-1) + - [`compute_shuffled_index`](#compute_shuffled_index) + - [`compute_proposer_index`](#compute_proposer_index) + - [`compute_committee`](#compute_committee) + - [`compute_epoch_at_slot`](#compute_epoch_at_slot) + - [`compute_start_slot_at_epoch`](#compute_start_slot_at_epoch) + - [`compute_activation_exit_epoch`](#compute_activation_exit_epoch) + - [`compute_domain`](#compute_domain) + - [Beacon state accessors](#beacon-state-accessors) + - [`get_current_epoch`](#get_current_epoch) + - [`get_previous_epoch`](#get_previous_epoch) + - [`get_block_root`](#get_block_root) + - [`get_block_root_at_slot`](#get_block_root_at_slot) + - [`get_randao_mix`](#get_randao_mix) + - [`get_active_validator_indices`](#get_active_validator_indices) + - [`get_validator_churn_limit`](#get_validator_churn_limit) + - [`get_seed`](#get_seed) + - [`get_committee_count_at_slot`](#get_committee_count_at_slot) + - [`get_beacon_committee`](#get_beacon_committee) + - [`get_beacon_proposer_index`](#get_beacon_proposer_index) + - [`get_total_balance`](#get_total_balance) + - [`get_total_active_balance`](#get_total_active_balance) + - [`get_domain`](#get_domain) + - [`get_indexed_attestation`](#get_indexed_attestation) + - [`get_attesting_indices`](#get_attesting_indices) + - [Beacon state mutators](#beacon-state-mutators) + - [`increase_balance`](#increase_balance) + - [`decrease_balance`](#decrease_balance) + - [`initiate_validator_exit`](#initiate_validator_exit) + - [`slash_validator`](#slash_validator) +- [Genesis](#genesis) + - [Genesis state](#genesis-state) + - [Genesis block](#genesis-block) +- [Beacon chain state transition function](#beacon-chain-state-transition-function) + - [Epoch processing](#epoch-processing) + - [Helper functions](#helper-functions-1) + - [Justification and finalization](#justification-and-finalization) + - [Rewards and penalties](#rewards-and-penalties-1) + - [Registry updates](#registry-updates) + - [Slashings](#slashings) + - [Final updates](#final-updates) + - [Block processing](#block-processing) + - [Block header](#block-header) + - [RANDAO](#randao) + - [Eth1 data](#eth1-data) + - [Operations](#operations) + - [Proposer slashings](#proposer-slashings) + - [Attester slashings](#attester-slashings) + - [Attestations](#attestations) + - [Deposits](#deposits) + - [Voluntary exits](#voluntary-exits) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction @@ -484,7 +488,7 @@ class BeaconState(Container): ### Signed envelopes -Some messages in the protocol are wrapped in an envelop to better facilitate adding/pruning the signature and to `hash_tree_root` the `message` separate from the signature. +Some messages in the protocol are wrapped in an envelope to better facilitate adding/pruning the signature and to `hash_tree_root` the `message` separate from the signature. #### `SignedVoluntaryExit` @@ -591,6 +595,34 @@ def is_active_validator(validator: Validator, epoch: Epoch) -> bool: return validator.activation_epoch <= epoch < validator.exit_epoch ``` +#### `is_eligible_for_activation_queue` + +```python +def is_eligible_for_activation_queue(validator: Validator) -> bool: + """ + Check if ``validator`` is eligible to be placed into the activation queue. + """ + return ( + validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH + and validator.effective_balance == MAX_EFFECTIVE_BALANCE + ) +``` + +#### `is_eligible_for_activation` + +```python +def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: + """ + Check if ``validator`` is eligible for activation. + """ + return ( + # Placement in queue is finalized + validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch + # Has not yet been activated + and validator.activation_epoch == FAR_FUTURE_EPOCH + ) +``` + #### `is_slashable_validator` ```python @@ -628,8 +660,8 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe # Verify max number of indices if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE: return False - # Verify indices are sorted - if not indices == sorted(indices): + # Verify indices are sorted and unique + if not indices == sorted(set(indices)): return False # Verify aggregate signature if not bls_verify( @@ -1302,26 +1334,22 @@ def process_rewards_and_penalties(state: BeaconState) -> None: def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validators): - if ( - validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH - and validator.effective_balance == MAX_EFFECTIVE_BALANCE - ): - validator.activation_eligibility_epoch = get_current_epoch(state) + if is_eligible_for_activation_queue(validator): + validator.activation_eligibility_epoch = get_current_epoch(state) + 1 if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: initiate_validator_exit(state, ValidatorIndex(index)) - # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch + # Queue validators eligible for activation and not yet dequeued for activation activation_queue = sorted([ index for index, validator in enumerate(state.validators) - if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH - and validator.activation_epoch >= compute_activation_exit_epoch(state.finalized_checkpoint.epoch) - ], key=lambda index: state.validators[index].activation_eligibility_epoch) - # Dequeued validators for activation up to churn limit (without resetting activation epoch) + if is_eligible_for_activation(state, validator) + # Order by the sequence of activation_eligibility_epoch setting and then index + ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index)) + # Dequeued validators for activation up to churn limit for index in activation_queue[:get_validator_churn_limit(state)]: validator = state.validators[index] - if validator.activation_epoch == FAR_FUTURE_EPOCH: - validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) + validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) ``` #### Slashings @@ -1482,6 +1510,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.index < get_committee_count_at_slot(state, data.slot) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH committee = get_beacon_committee(state, data.slot, data.index) diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index 06962594ed..c9f3663309 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -4,19 +4,21 @@ ## Table of contents <!-- TOC --> +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> -- [Ethereum 2.0 Phase 0 -- Deposit Contract](#ethereum-20-phase-0----deposit-contract) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Constants](#constants) - - [Contract](#contract) - - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) - - [`deposit` function](#deposit-function) - - [Deposit amount](#deposit-amount) - - [Withdrawal credentials](#withdrawal-credentials) - - [`DepositEvent` log](#depositevent-log) - - [Vyper code](#vyper-code) +- [Introduction](#introduction) +- [Constants](#constants) + - [Contract](#contract) +- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) + - [`deposit` function](#deposit-function) + - [Deposit amount](#deposit-amount) + - [Withdrawal credentials](#withdrawal-credentials) + - [`DepositEvent` log](#depositevent-log) +- [Vyper code](#vyper-code) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 85576ee56c..34430d092b 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -4,23 +4,32 @@ ## Table of contents <!-- TOC --> - -- [Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice](#ethereum-20-phase-0----beacon-chain-fork-choice) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Fork choice](#fork-choice) - - [Helpers](#helpers) - - [`LatestMessage`](#latestmessage) - - [`Store`](#store) - - [`get_genesis_store`](#get_genesis_store) - - [`get_ancestor`](#get_ancestor) - - [`get_latest_attesting_balance`](#get_latest_attesting_balance) - - [`get_head`](#get_head) - - [Handlers](#handlers) - - [`on_tick`](#on_tick) - - [`on_block`](#on_block) - - [`on_attestation`](#on_attestation) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Fork choice](#fork-choice) + - [Configuration](#configuration) + - [Helpers](#helpers) + - [`LatestMessage`](#latestmessage) + - [`Store`](#store) + - [`get_genesis_store`](#get_genesis_store) + - [`get_slots_since_genesis`](#get_slots_since_genesis) + - [`get_current_slot`](#get_current_slot) + - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) + - [`get_ancestor`](#get_ancestor) + - [`get_latest_attesting_balance`](#get_latest_attesting_balance) + - [`filter_block_tree`](#filter_block_tree) + - [`get_filtered_block_tree`](#get_filtered_block_tree) + - [`get_head`](#get_head) + - [`should_update_justified_checkpoint`](#should_update_justified_checkpoint) + - [Handlers](#handlers) + - [`on_tick`](#on_tick) + - [`on_block`](#on_block) + - [`on_attestation`](#on_attestation) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction @@ -96,11 +105,18 @@ def get_genesis_store(genesis_state: BeaconState) -> Store: ) ``` +#### `get_slots_since_genesis` + +```python +def get_slots_since_genesis(store: Store) -> int: + return (store.time - store.genesis_time) // SECONDS_PER_SLOT +``` + #### `get_current_slot` ```python def get_current_slot(store: Store) -> Slot: - return Slot((store.time - store.genesis_time) // SECONDS_PER_SLOT) + return Slot(GENESIS_SLOT + get_slots_since_genesis(store)) ``` #### `compute_slots_since_epoch_start` @@ -136,17 +152,72 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: )) ``` +#### `filter_block_tree` + +```python +def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: + block = store.blocks[block_root] + children = [ + root for root in store.blocks.keys() + if store.blocks[root].parent_root == block_root + ] + + # If any children branches contain expected finalized/justified checkpoints, + # add to filtered block-tree and signal viability to parent. + if any(children): + filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children] + if any(filter_block_tree_result): + blocks[block_root] = block + return True + return False + + # If leaf block, check finalized/justified checkpoints as matching latest. + head_state = store.block_states[block_root] + + correct_justified = ( + store.justified_checkpoint.epoch == GENESIS_EPOCH + or head_state.current_justified_checkpoint == store.justified_checkpoint + ) + correct_finalized = ( + store.finalized_checkpoint.epoch == GENESIS_EPOCH + or head_state.finalized_checkpoint == store.finalized_checkpoint + ) + # If expected finalized/justified, add to viable block-tree and signal viability to parent. + if correct_justified and correct_finalized: + blocks[block_root] = block + return True + + # Otherwise, branch not viable + return False +``` + +#### `get_filtered_block_tree` + +```python +def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]: + """ + Retrieve a filtered block true from ``store``, only returning branches + whose leaf state's justified/finalized info agrees with that in ``store``. + """ + base = store.justified_checkpoint.root + blocks: Dict[Root, BeaconBlock] = {} + filter_block_tree(store, base, blocks) + return blocks +``` + #### `get_head` ```python def get_head(store: Store) -> Root: + # Get filtered block tree that only includes viable branches + blocks = get_filtered_block_tree(store) # Execute the LMD-GHOST fork choice head = store.justified_checkpoint.root justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch) while True: children = [ - root for root in store.blocks.keys() - if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot + root for root in blocks.keys() + if blocks[root].parent_root == head and blocks[root].slot > justified_slot ] if len(children) == 0: return head @@ -172,8 +243,8 @@ def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: C if new_justified_block.slot <= compute_start_slot_at_epoch(store.justified_checkpoint.epoch): return False if not ( - get_ancestor(store, new_justified_checkpoint.root, store.blocks[store.justified_checkpoint.root].slot) == - store.justified_checkpoint.root + get_ancestor(store, new_justified_checkpoint.root, store.blocks[store.justified_checkpoint.root].slot) + == store.justified_checkpoint.root ): return False @@ -209,7 +280,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: assert block.parent_root in store.block_states pre_state = store.block_states[block.parent_root].copy() # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. - assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT + assert get_current_slot(store) >= block.slot # Add new block to the store store.blocks[hash_tree_root(block)] = block # Check block is a descendant of the finalized block @@ -226,7 +297,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Update justified checkpoint if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint + if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: + store.best_justified_checkpoint = state.current_justified_checkpoint if should_update_justified_checkpoint(store, state.current_justified_checkpoint): store.justified_checkpoint = state.current_justified_checkpoint @@ -252,12 +324,13 @@ def on_attestation(store: Store, attestation: Attestation) -> None: # Use GENESIS_EPOCH for previous when genesis to avoid underflow previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH assert target.epoch in [current_epoch, previous_epoch] + assert target.epoch == compute_epoch_at_slot(attestation.data.slot) # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found assert target.root in store.blocks # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives base_state = store.block_states[target.root].copy() - assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT + assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch) # Attestations must be for a known block. If block is unknown, delay consideration until the block is found assert attestation.data.beacon_block_root in store.blocks @@ -272,7 +345,7 @@ def on_attestation(store: Store, attestation: Attestation) -> None: # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. - assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT + assert get_current_slot(store) >= attestation.data.slot + 1 # Get state at the `target` to validate attestation and calculate the committees indexed_attestation = get_indexed_attestation(target_state, attestation) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index d99aca0ef2..b27c72b347 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -3,29 +3,31 @@ ## Table of contents <!-- TOC --> - -- [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - - [Table of contents](#table-of-contents) - - [Configuration](#configuration) - - [Containers](#containers) - - [`CompactCommittee`](#compactcommittee) - - [`ShardReceiptDelta`](#shardreceiptdelta) - - [`ShardReceiptProof`](#shardreceiptproof) - - [Helper functions](#helper-functions) - - [`pack_compact_validator`](#pack_compact_validator) - - [`unpack_compact_validator`](#unpack_compact_validator) - - [`committee_to_compact_committee`](#committee_to_compact_committee) - - [`verify_merkle_proof`](#verify_merkle_proof) - - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) - - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) - - [`process_shard_receipt_proof`](#process_shard_receipt_proof) - - [Changes](#changes) - - [Phase 0 container updates](#phase-0-container-updates) - - [`BeaconState`](#beaconstate) - - [`BeaconBlockBody`](#beaconblockbody) - - [Persistent committees](#persistent-committees) - - [Shard receipt processing](#shard-receipt-processing) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Configuration](#configuration) +- [Containers](#containers) + - [`CompactCommittee`](#compactcommittee) + - [`ShardReceiptDelta`](#shardreceiptdelta) + - [`ShardReceiptProof`](#shardreceiptproof) +- [Helper functions](#helper-functions) + - [`pack_compact_validator`](#pack_compact_validator) + - [`unpack_compact_validator`](#unpack_compact_validator) + - [`committee_to_compact_committee`](#committee_to_compact_committee) + - [`verify_merkle_proof`](#verify_merkle_proof) + - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) + - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) + - [`process_shard_receipt_proof`](#process_shard_receipt_proof) +- [Changes](#changes) + - [Phase 0 container updates](#phase-0-container-updates) + - [`BeaconState`](#beaconstate) + - [`BeaconBlockBody`](#beaconblockbody) + - [Persistent committees](#persistent-committees) + - [Shard receipt processing](#shard-receipt-processing) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Configuration diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 946165bb70..7eb9e25265 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -5,54 +5,56 @@ ## Table of contents <!-- TOC --> - -- [Ethereum 2.0 Phase 1 -- Custody Game](#ethereum-20-phase-1----custody-game) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Terminology](#terminology) - - [Constants](#constants) - - [Misc](#misc) - - [Custody game parameters](#custody-game-parameters) - - [Time parameters](#time-parameters) - - [Max operations per block](#max-operations-per-block) - - [Reward and penalty quotients](#reward-and-penalty-quotients) - - [Signature domain types](#signature-domain-types) - - [TODO PLACEHOLDER](#todo-placeholder) - - [Data structures](#data-structures) - - [Custody objects](#custody-objects) - - [`CustodyChunkChallenge`](#custodychunkchallenge) - - [`CustodyBitChallenge`](#custodybitchallenge) - - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) - - [`CustodyBitChallengeRecord`](#custodybitchallengerecord) - - [`CustodyResponse`](#custodyresponse) - - [New beacon operations](#new-beacon-operations) - - [`CustodyKeyReveal`](#custodykeyreveal) - - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) - - [Phase 0 container updates](#phase-0-container-updates) - - [`Validator`](#validator) - - [`BeaconState`](#beaconstate) - - [`BeaconBlockBody`](#beaconblockbody) - - [Helpers](#helpers) - - [`ceillog2`](#ceillog2) - - [`is_valid_merkle_branch_with_mixin`](#is_valid_merkle_branch_with_mixin) - - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - - [`legendre_bit`](#legendre_bit) - - [`custody_subchunkify`](#custody_subchunkify) - - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - - [`get_chunk_bits_root`](#get_chunk_bits_root) - - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) - - [`get_custody_period_for_validator`](#get_custody_period_for_validator) - - [`replace_empty_or_append`](#replace_empty_or_append) - - [Per-block processing](#per-block-processing) - - [Operations](#operations) - - [Custody key reveals](#custody-key-reveals) - - [Early derived secret reveals](#early-derived-secret-reveals) - - [Chunk challenges](#chunk-challenges) - - [Bit challenges](#bit-challenges) - - [Custody responses](#custody-responses) - - [Per-epoch processing](#per-epoch-processing) - - [Handling of custody-related deadlines](#handling-of-custody-related-deadlines) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Terminology](#terminology) +- [Constants](#constants) + - [Misc](#misc) + - [Custody game parameters](#custody-game-parameters) + - [Time parameters](#time-parameters) + - [Max operations per block](#max-operations-per-block) + - [Reward and penalty quotients](#reward-and-penalty-quotients) + - [Signature domain types](#signature-domain-types) + - [TODO PLACEHOLDER](#todo-placeholder) +- [Data structures](#data-structures) + - [Custody objects](#custody-objects) + - [`CustodyChunkChallenge`](#custodychunkchallenge) + - [`CustodyBitChallenge`](#custodybitchallenge) + - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) + - [`CustodyBitChallengeRecord`](#custodybitchallengerecord) + - [`CustodyResponse`](#custodyresponse) + - [New beacon operations](#new-beacon-operations) + - [`CustodyKeyReveal`](#custodykeyreveal) + - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) + - [Phase 0 container updates](#phase-0-container-updates) + - [`Validator`](#validator) + - [`BeaconState`](#beaconstate) + - [`BeaconBlockBody`](#beaconblockbody) +- [Helpers](#helpers) + - [`ceillog2`](#ceillog2) + - [`is_valid_merkle_branch_with_mixin`](#is_valid_merkle_branch_with_mixin) + - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) + - [`legendre_bit`](#legendre_bit) + - [`custody_subchunkify`](#custody_subchunkify) + - [`get_custody_chunk_bit`](#get_custody_chunk_bit) + - [`get_chunk_bits_root`](#get_chunk_bits_root) + - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) + - [`get_custody_period_for_validator`](#get_custody_period_for_validator) + - [`replace_empty_or_append`](#replace_empty_or_append) +- [Per-block processing](#per-block-processing) + - [Operations](#operations) + - [Custody key reveals](#custody-key-reveals) + - [Early derived secret reveals](#early-derived-secret-reveals) + - [Chunk challenges](#chunk-challenges) + - [Bit challenges](#bit-challenges) + - [Custody responses](#custody-responses) +- [Per-epoch processing](#per-epoch-processing) + - [Handling of custody-related deadlines](#handling-of-custody-related-deadlines) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 6fce90626b..d5964fe7d4 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -5,45 +5,47 @@ ## Table of contents <!-- TOC --> - -- [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Custom types](#custom-types) - - [Configuration](#configuration) - - [Misc](#misc) - - [Initial values](#initial-values) - - [Time parameters](#time-parameters) - - [State list lengths](#state-list-lengths) - - [Rewards and penalties](#rewards-and-penalties) - - [Signature domain types](#signature-domain-types) - - [Containers](#containers) - - [`Crosslink`](#crosslink) - - [`ShardBlock`](#shardblock) - - [`ShardBlockHeader`](#shardblockheader) - - [`ShardState`](#shardstate) - - [`ShardAttestationData`](#shardattestationdata) - - [Helper functions](#helper-functions) - - [Misc](#misc-1) - - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - - [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch) - - [Beacon state accessors](#beacon-state-accessors) - - [`get_period_committee`](#get_period_committee) - - [`get_shard_committee`](#get_shard_committee) - - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [Shard state mutators](#shard-state-mutators) - - [`process_delta`](#process_delta) - - [Genesis](#genesis) - - [`get_genesis_shard_state`](#get_genesis_shard_state) - - [`get_genesis_shard_block`](#get_genesis_shard_block) - - [Shard state transition function](#shard-state-transition-function) - - [Period processing](#period-processing) - - [Block processing](#block-processing) - - [Block header](#block-header) - - [Attestations](#attestations) - - [Block body](#block-body) - - [Shard fork choice rule](#shard-fork-choice-rule) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Custom types](#custom-types) +- [Configuration](#configuration) + - [Misc](#misc) + - [Initial values](#initial-values) + - [Time parameters](#time-parameters) + - [State list lengths](#state-list-lengths) + - [Rewards and penalties](#rewards-and-penalties) + - [Signature domain types](#signature-domain-types) +- [Containers](#containers) + - [`Crosslink`](#crosslink) + - [`ShardBlock`](#shardblock) + - [`ShardBlockHeader`](#shardblockheader) + - [`ShardState`](#shardstate) + - [`ShardAttestationData`](#shardattestationdata) +- [Helper functions](#helper-functions) + - [Misc](#misc-1) + - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) + - [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch) + - [Beacon state accessors](#beacon-state-accessors) + - [`get_period_committee`](#get_period_committee) + - [`get_shard_committee`](#get_shard_committee) + - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [Shard state mutators](#shard-state-mutators) + - [`process_delta`](#process_delta) +- [Genesis](#genesis) + - [`get_genesis_shard_state`](#get_genesis_shard_state) + - [`get_genesis_shard_block`](#get_genesis_shard_block) +- [Shard state transition function](#shard-state-transition-function) + - [Period processing](#period-processing) + - [Block processing](#block-processing) + - [Block header](#block-header) + - [Attestations](#attestations) + - [Block body](#block-body) +- [Shard fork choice rule](#shard-fork-choice-rule) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index c89235cb58..3e176f2aa0 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -4,21 +4,23 @@ ## Table of contents <!-- TOC --> - -- [Merkle proof formats](#merkle-proof-formats) - - [Table of contents](#table-of-contents) - - [Helper functions](#helper-functions) - - [Generalized Merkle tree index](#generalized-merkle-tree-index) - - [SSZ object to index](#ssz-object-to-index) - - [Helpers for generalized indices](#helpers-for-generalized-indices) - - [`concat_generalized_indices`](#concat_generalized_indices) - - [`get_generalized_index_length`](#get_generalized_index_length) - - [`get_generalized_index_bit`](#get_generalized_index_bit) - - [`generalized_index_sibling`](#generalized_index_sibling) - - [`generalized_index_child`](#generalized_index_child) - - [`generalized_index_parent`](#generalized_index_parent) - - [Merkle multiproofs](#merkle-multiproofs) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Helper functions](#helper-functions) +- [Generalized Merkle tree index](#generalized-merkle-tree-index) +- [SSZ object to index](#ssz-object-to-index) + - [Helpers for generalized indices](#helpers-for-generalized-indices) + - [`concat_generalized_indices`](#concat_generalized_indices) + - [`get_generalized_index_length`](#get_generalized_index_length) + - [`get_generalized_index_bit`](#get_generalized_index_bit) + - [`generalized_index_sibling`](#generalized_index_sibling) + - [`generalized_index_child`](#generalized_index_child) + - [`generalized_index_parent`](#generalized_index_parent) +- [Merkle multiproofs](#merkle-multiproofs) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Helper functions diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 00edb990d7..05180516bf 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -5,20 +5,22 @@ ## Table of contents <!-- TOC --> - -- [Minimal Light Client Design](#minimal-light-client-design) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Custom types](#custom-types) - - [Constants](#constants) - - [Containers](#containers) - - [`LightClientUpdate`](#lightclientupdate) - - [Helpers](#helpers) - - [`LightClientMemory`](#lightclientmemory) - - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) - - [Light client state updates](#light-client-state-updates) - - [Data overhead](#data-overhead) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Custom types](#custom-types) +- [Constants](#constants) +- [Containers](#containers) + - [`LightClientUpdate`](#lightclientupdate) +- [Helpers](#helpers) + - [`LightClientMemory`](#lightclientmemory) + - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) +- [Light client state updates](#light-client-state-updates) +- [Data overhead](#data-overhead) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 414dc8cde0..84539713dd 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -18,23 +18,88 @@ It consists of four main sections: - [Network fundamentals](#network-fundamentals) - [Transport](#transport) + - [Interop](#interop) + - [Mainnet](#mainnet) - [Encryption and identification](#encryption-and-identification) - - [Protocol negotiation](#protocol-negotiation) + - [Interop](#interop-1) + - [Mainnet](#mainnet-1) + - [Protocol Negotiation](#protocol-negotiation) + - [Interop](#interop-2) + - [Mainnet](#mainnet-2) - [Multiplexing](#multiplexing) - [Eth2 network interaction domains](#eth2-network-interaction-domains) - [Configuration](#configuration) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [Attestation subnets](#attestation-subnets) + - [Interop](#interop-3) + - [Mainnet](#mainnet-3) + - [Encodings](#encodings) + - [Interop](#interop-4) + - [Mainnet](#mainnet-4) - [The Req/Resp domain](#the-reqresp-domain) + - [Protocol identification](#protocol-identification) + - [Req/Resp interaction](#reqresp-interaction) + - [Requesting side](#requesting-side) + - [Responding side](#responding-side) + - [Encoding strategies](#encoding-strategies) + - [SSZ-encoding strategy (with or without Snappy)](#ssz-encoding-strategy-with-or-without-snappy) + - [Messages](#messages) + - [Status](#status) + - [Goodbye](#goodbye) + - [BeaconBlocksByRange](#beaconblocksbyrange) + - [BeaconBlocksByRoot](#beaconblocksbyroot) - [The discovery domain: discv5](#the-discovery-domain-discv5) + - [Integration into libp2p stacks](#integration-into-libp2p-stacks) + - [ENR structure](#enr-structure) + - [Interop](#interop-5) + - [Mainnet](#mainnet-5) + - [Topic advertisement](#topic-advertisement) + - [Interop](#interop-6) + - [Mainnet](#mainnet-6) - [Design decision rationale](#design-decision-rationale) - [Transport](#transport-1) + - [Why are we defining specific transports?](#why-are-we-defining-specific-transports) + - [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec) + - [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets) + - [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport) + - [Why are we not using QUIC for mainnet from the start?](#why-are-we-not-using-quic-for-mainnet-from-the-start) - [Multiplexing](#multiplexing-1) - - [Protocol negotiation](#protocol-negotiation-1) + - [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux) + - [Protocol Negotiation](#protocol-negotiation-1) + - [When is multiselect 2.0 due and why are we using it for mainnet?](#when-is-multiselect-20-due-and-why-are-we-using-it-for-mainnet) + - [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation) - [Encryption](#encryption) + - [Why are we using SecIO for interop? Why not for mainnet?](#why-are-we-using-secio-for-interop-why-not-for-mainnet) + - [Why are we using Noise/TLS 1.3 for mainnet?](#why-are-we-using-noisetls-13-for-mainnet) + - [Why are we using encryption at all?](#why-are-we-using-encryption-at-all) + - [Will mainnnet networking be untested when it launches?](#will-mainnnet-networking-be-untested-when-it-launches) - [Gossipsub](#gossipsub) + - [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation) + - [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding) + - [How do we upgrade gossip channels (e.g. changes in encoding, compression)?](#how-do-we-upgrade-gossip-channels-eg-changes-in-encoding-compression) + - [Why must all clients use the same gossip topic instead of one negotiated between each peer pair?](#why-must-all-clients-use-the-same-gossip-topic-instead-of-one-negotiated-between-each-peer-pair) + - [Why are the topics strings and not hashes?](#why-are-the-topics-strings-and-not-hashes) + - [Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?](#why-are-there-attestation_subnet_count-attestation-subnets) + - [Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?](#why-are-attestations-limited-to-be-broadcast-on-gossip-channels-within-slots_per_epoch-slots) + - [Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?](#why-are-aggregate-attestations-broadcast-to-the-global-topic-as-aggregateandproofs-rather-than-just-as-attestations) + - [Why are we sending entire objects in the pubsub and not just hashes?](#why-are-we-sending-entire-objects-in-the-pubsub-and-not-just-hashes) + - [Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?](#should-clients-gossip-blocks-if-they-cannot-validate-the-proposer-signature-due-to-not-yet-being-synced-not-knowing-the-head-block-etc) + - [How are we going to discover peers in a gossipsub topic?](#how-are-we-going-to-discover-peers-in-a-gossipsub-topic) - [Req/Resp](#reqresp) + - [Why segregate requests into dedicated protocol IDs?](#why-segregate-requests-into-dedicated-protocol-ids) + - [Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?](#why-are-messages-length-prefixed-with-a-protobuf-varint-in-the-ssz-encoding) + - [Why do we version protocol strings with ordinals instead of semver?](#why-do-we-version-protocol-strings-with-ordinals-instead-of-semver) + - [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc) - [Discovery](#discovery) + - [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht) + - [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs) - [Compression/Encoding](#compressionencoding) + - [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding) + - [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers) + - [Why are using Snappy for compression?](#why-are-using-snappy-for-compression) + - [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes) - [libp2p implementations matrix](#libp2p-implementations-matrix) <!-- END doctoc generated TOC please keep comment here to allow auto update --> @@ -356,7 +421,7 @@ The fields are, as seen by the client at the time of sending the message: - `head_fork_version`: The beacon_state `Fork` version. - `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block. - `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block. -- `head_root`: The signing root of the current head block. +- `head_root`: The hash_tree_root root of the current head block. - `head_slot`: The slot of the block corresponding to the `head_root`. The dialing client MUST send a `Status` request upon connection. @@ -567,7 +632,7 @@ Conscious of that, the libp2p community conceptualized [mplex](https://github.co Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support. -## Protocol negotiation +## Protocol Negotiation ### When is multiselect 2.0 due and why are we using it for mainnet? diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 556c31b274..860a27f012 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -4,30 +4,32 @@ ## Table of contents <!-- TOC --> - -- [SimpleSerialize (SSZ)](#simpleserialize-ssz) - - [Table of contents](#table-of-contents) - - [Constants](#constants) - - [Typing](#typing) - - [Basic types](#basic-types) - - [Composite types](#composite-types) - - [Variable-size and fixed-size](#variable-size-and-fixed-size) - - [Aliases](#aliases) - - [Default values](#default-values) - - [`is_zero`](#is_zero) - - [Illegal types](#illegal-types) - - [Serialization](#serialization) - - [`uintN`](#uintn) - - [`boolean`](#boolean) - - [`null`](#null) - - [`Bitvector[N]`](#bitvectorn) - - [`Bitlist[N]`](#bitlistn) - - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) - - [Deserialization](#deserialization) - - [Merkleization](#merkleization) - - [Summaries and expansions](#summaries-and-expansions) - - [Implementations](#implementations) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Constants](#constants) +- [Typing](#typing) + - [Basic types](#basic-types) + - [Composite types](#composite-types) + - [Variable-size and fixed-size](#variable-size-and-fixed-size) + - [Aliases](#aliases) + - [Default values](#default-values) + - [`is_zero`](#is_zero) + - [Illegal types](#illegal-types) +- [Serialization](#serialization) + - [`uintN`](#uintn) + - [`boolean`](#boolean) + - [`null`](#null) + - [`Bitvector[N]`](#bitvectorn) + - [`Bitlist[N]`](#bitlistn) + - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) +- [Deserialization](#deserialization) +- [Merkleization](#merkleization) +- [Summaries and expansions](#summaries-and-expansions) +- [Implementations](#implementations) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Constants diff --git a/specs/test_formats/ssz_generic/README.md b/specs/test_formats/ssz_generic/README.md index ce0f709dde..b6faa04afd 100644 --- a/specs/test_formats/ssz_generic/README.md +++ b/specs/test_formats/ssz_generic/README.md @@ -37,7 +37,7 @@ Valid has 3 parts: `meta.yaml`, `serialized.ssz`, `value.yaml` ### `meta.yaml` -Valid ssz objects can have a hash-tree-root, and for some types also a signing-root. +Valid ssz objects can have a hash-tree-root. The expected roots are encoded into the metadata yaml: ```yaml @@ -61,7 +61,6 @@ The conditions are the same for each type: - Encoding: After encoding the given `value` object, the output should match `serialized`. - Decoding: After decoding the given `serialized` bytes, it should match the `value` object. - Hash-tree-root: the root should match the root declared in the metadata. -- Signing-root: if present in metadata, the signing root of the object should match the container. ## `invalid` diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 3b90350e39..76bcc3b7d8 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -5,61 +5,64 @@ ## Table of contents <!-- TOC --> - -- [Ethereum 2.0 Phase 0 -- Honest Validator](#ethereum-20-phase-0----honest-validator) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Prerequisites](#prerequisites) - - [Constants](#constants) - - [Misc](#misc) - - [Becoming a validator](#becoming-a-validator) - - [Initialization](#initialization) - - [BLS public key](#bls-public-key) - - [BLS withdrawal key](#bls-withdrawal-key) - - [Submit deposit](#submit-deposit) - - [Process deposit](#process-deposit) - - [Validator index](#validator-index) - - [Activation](#activation) - - [Validator assignments](#validator-assignments) - - [Lookahead](#lookahead) - - [Beacon chain responsibilities](#beacon-chain-responsibilities) - - [Block proposal](#block-proposal) - - [Block header](#block-header) - - [Slot](#slot) - - [Parent root](#parent-root) - - [State root](#state-root) - - [Randao reveal](#randao-reveal) - - [Eth1 Data](#eth1-data) - - [Signature](#signature) - - [Block body](#block-body) - - [Proposer slashings](#proposer-slashings) - - [Attester slashings](#attester-slashings) - - [Attestations](#attestations) - - [Deposits](#deposits) - - [Voluntary exits](#voluntary-exits) - - [Attesting](#attesting) - - [Attestation data](#attestation-data) - - [General](#general) - - [LMD GHOST vote](#lmd-ghost-vote) - - [FFG vote](#ffg-vote) - - [Construct attestation](#construct-attestation) - - [Data](#data) - - [Aggregation bits](#aggregation-bits) - - [Aggregate signature](#aggregate-signature) - - [Broadcast attestation](#broadcast-attestation) - - [Attestation aggregation](#attestation-aggregation) - - [Aggregation selection](#aggregation-selection) - - [Construct aggregate](#construct-aggregate) - - [Data](#data-1) - - [Aggregation bits](#aggregation-bits-1) - - [Aggregate signature](#aggregate-signature-1) - - [Broadcast aggregate](#broadcast-aggregate) - - [`AggregateAndProof`](#aggregateandproof) - - [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability) - - [How to avoid slashing](#how-to-avoid-slashing) - - [Proposer slashing](#proposer-slashing) - - [Attester slashing](#attester-slashing) - +<!-- START doctoc generated TOC please keep comment here to allow auto update --> +<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Constants](#constants) + - [Misc](#misc) +- [Becoming a validator](#becoming-a-validator) + - [Initialization](#initialization) + - [BLS public key](#bls-public-key) + - [BLS withdrawal key](#bls-withdrawal-key) + - [Submit deposit](#submit-deposit) + - [Process deposit](#process-deposit) + - [Validator index](#validator-index) + - [Activation](#activation) +- [Validator assignments](#validator-assignments) + - [Lookahead](#lookahead) +- [Beacon chain responsibilities](#beacon-chain-responsibilities) + - [Block proposal](#block-proposal) + - [Preparing for a `BeaconBlock`](#preparing-for-a-beaconblock) + - [Slot](#slot) + - [Parent root](#parent-root) + - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) + - [Randao reveal](#randao-reveal) + - [Eth1 Data](#eth1-data) + - [Proposer slashings](#proposer-slashings) + - [Attester slashings](#attester-slashings) + - [Attestations](#attestations) + - [Deposits](#deposits) + - [Voluntary exits](#voluntary-exits) + - [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock) + - [State root](#state-root) + - [Signature](#signature) + - [Attesting](#attesting) + - [Attestation data](#attestation-data) + - [General](#general) + - [LMD GHOST vote](#lmd-ghost-vote) + - [FFG vote](#ffg-vote) + - [Construct attestation](#construct-attestation) + - [Data](#data) + - [Aggregation bits](#aggregation-bits) + - [Aggregate signature](#aggregate-signature) + - [Broadcast attestation](#broadcast-attestation) + - [Attestation aggregation](#attestation-aggregation) + - [Aggregation selection](#aggregation-selection) + - [Construct aggregate](#construct-aggregate) + - [Data](#data-1) + - [Aggregation bits](#aggregation-bits-1) + - [Aggregate signature](#aggregate-signature-1) + - [Broadcast aggregate](#broadcast-aggregate) + - [`AggregateAndProof`](#aggregateandproof) +- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability) +- [How to avoid slashing](#how-to-avoid-slashing) + - [Proposer slashing](#proposer-slashing) + - [Attester slashing](#attester-slashing) + +<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- /TOC --> ## Introduction diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py index 7a125c3be2..1f412e7879 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py @@ -1,7 +1,11 @@ from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.state import state_transition_and_sign_block +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_with_attestations, + state_transition_and_sign_block, +) def add_block_to_store(spec, store, signed_block): @@ -112,3 +116,79 @@ def test_shorter_chain_but_heavier_weight(spec, state): add_attestation_to_store(spec, store, short_attestation) assert spec.get_head(store) == spec.hash_tree_root(short_block) + + +@with_all_phases +@spec_state_test +def test_filtered_block_tree(spec, state): + # Initialization + genesis_state_root = state.hash_tree_root() + store = spec.get_genesis_store(state) + genesis_block = spec.BeaconBlock(state_root=genesis_state_root) + + # transition state past initial couple of epochs + next_epoch(spec, state) + next_epoch(spec, state) + + assert spec.get_head(store) == spec.hash_tree_root(genesis_block) + + # fill in attestations for entire epoch, justifying the recent epoch + prev_state, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False) + attestations = [ + attestation for signed_block in signed_blocks + for attestation in signed_block.message.body.attestations + ] + assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch + + # tick time forward and add blocks and attestations to store + current_time = state.slot * spec.SECONDS_PER_SLOT + store.genesis_time + spec.on_tick(store, current_time) + for signed_block in signed_blocks: + spec.on_block(store, signed_block) + for attestation in attestations: + spec.on_attestation(store, attestation) + + assert store.justified_checkpoint == state.current_justified_checkpoint + + # the last block in the branch should be the head + expected_head_root = spec.hash_tree_root(signed_blocks[-1].message) + assert spec.get_head(store) == expected_head_root + + # + # create branch containing the justified block but not containing enough on + # chain votes to justify that block + # + + # build a chain without attestations off of previous justified block + non_viable_state = store.block_states[store.justified_checkpoint.root].copy() + + # ensure that next wave of votes are for future epoch + next_epoch(spec, non_viable_state) + next_epoch(spec, non_viable_state) + next_epoch(spec, non_viable_state) + assert spec.get_current_epoch(non_viable_state) > store.justified_checkpoint.epoch + + # create rogue block that will be attested to in this non-viable branch + rogue_block = build_empty_block_for_next_slot(spec, non_viable_state) + signed_rogue_block = state_transition_and_sign_block(spec, non_viable_state, rogue_block) + + # create an epoch's worth of attestations for the rogue block + next_epoch(spec, non_viable_state) + attestations = [] + for i in range(spec.SLOTS_PER_EPOCH): + slot = rogue_block.slot + i + for index in range(spec.get_committee_count_at_slot(non_viable_state, slot)): + attestation = get_valid_attestation(spec, non_viable_state, rogue_block.slot + i, index) + attestations.append(attestation) + + # tick time forward to be able to include up to the latest attestation + current_time = (attestations[-1].data.slot + 1) * spec.SECONDS_PER_SLOT + store.genesis_time + spec.on_tick(store, current_time) + + # include rogue block and associated attestations in the store + spec.on_block(store, signed_rogue_block) + for attestation in attestations: + spec.on_attestation(store, attestation) + + # ensure that get_head still returns the head from the previous branch + assert spec.get_head(store) == expected_head_root diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py index 8f1d6f74f7..d7fbc4777a 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py @@ -84,6 +84,29 @@ def test_on_attestation_past_epoch(spec, state): run_on_attestation(spec, state, store, attestation, False) +@with_all_phases +@spec_state_test +def test_on_attestation_mismatched_target_and_slot(spec, state): + store = spec.get_genesis_store(state) + spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH) + + block = build_empty_block_for_next_slot(spec, state) + signed_block = state_transition_and_sign_block(spec, state, block) + + # store block in store + spec.on_block(store, signed_block) + + attestation = get_valid_attestation(spec, state, slot=block.slot) + attestation.data.target.epoch += 1 + sign_attestation(spec, state, attestation) + + assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1 + assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 1 + + run_on_attestation(spec, state, store, attestation, False) + + @with_all_phases @spec_state_test def test_on_attestation_target_not_in_store(spec, state): diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py index 4fda49664b..10d1c0011b 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py @@ -168,7 +168,7 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): @with_all_phases @spec_state_test -def test_on_block_outside_safe_slots_and_old_block(spec, state): +def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): # Initialization store = spec.get_genesis_store(state) time = 100 @@ -187,20 +187,30 @@ def test_on_block_outside_safe_slots_and_old_block(spec, state): just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch) store.blocks[just_block.hash_tree_root()] = just_block - # Mock the justified checkpoint - just_state = store.block_states[last_block_root] - new_justified = spec.Checkpoint( - epoch=just_state.current_justified_checkpoint.epoch + 1, - root=just_block.hash_tree_root(), - ) - just_state.current_justified_checkpoint = new_justified - - block = build_empty_block_for_next_slot(spec, just_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) - + # Step time past safe slots spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.SECONDS_PER_SLOT) assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - run_on_block(spec, store, signed_block) - assert store.justified_checkpoint != new_justified - assert store.best_justified_checkpoint == new_justified + previously_justified = store.justified_checkpoint + + # Add a series of new blocks with "better" justifications + best_justified_checkpoint = spec.Checkpoint(epoch=0) + for i in range(3, 0, -1): + just_state = store.block_states[last_block_root] + new_justified = spec.Checkpoint( + epoch=previously_justified.epoch + i, + root=just_block.hash_tree_root(), + ) + if new_justified.epoch > best_justified_checkpoint.epoch: + best_justified_checkpoint = new_justified + + just_state.current_justified_checkpoint = new_justified + + block = build_empty_block_for_next_slot(spec, just_state) + signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) + + run_on_block(spec, store, signed_block) + + assert store.justified_checkpoint == previously_justified + # ensure the best from the series was stored + assert store.best_justified_checkpoint == best_justified_checkpoint diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index f19bc66d36..d48386fd49 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -177,6 +177,20 @@ def test_invalid_index(spec, state): yield from run_attestation_processing(spec, state, attestation, False) +@with_all_phases +@spec_state_test +def test_mismatched_target_and_slot(spec, state): + next_epoch(spec, state) + next_epoch(spec, state) + + attestation = get_valid_attestation(spec, state) + attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + @with_all_phases @spec_state_test def test_old_target_epoch(spec, state): diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py index 85e807ec00..98a6e25e5f 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py @@ -252,6 +252,76 @@ def test_att2_bad_replaced_index(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) +@with_all_phases +@spec_state_test +@always_bls +def test_att1_duplicate_index_normal_signed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + indices = attester_slashing.attestation_1.attesting_indices + indices.pop(1) # remove an index, make room for the additional duplicate index. + attester_slashing.attestation_1.attesting_indices = sorted(indices) + + # sign it, the signature will be valid for a single occurence. If the transition accidentally ignores the duplicate. + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + indices.append(indices[0]) # add one of the indices a second time + attester_slashing.attestation_1.attesting_indices = sorted(indices) + + # it will just appear normal, unless the double index is spotted + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +@always_bls +def test_att2_duplicate_index_normal_signed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) + + indices = attester_slashing.attestation_2.attesting_indices + indices.pop(2) # remove an index, make room for the additional duplicate index. + attester_slashing.attestation_2.attesting_indices = sorted(indices) + + # sign it, the signature will be valid for a single occurence. If the transition accidentally ignores the duplicate. + sign_indexed_attestation(spec, state, attester_slashing.attestation_2) + + indices.append(indices[1]) # add one of the indices a second time + attester_slashing.attestation_2.attesting_indices = sorted(indices) + + # it will just appear normal, unless the double index is spotted + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +@always_bls +def test_att1_duplicate_index_double_signed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + indices = attester_slashing.attestation_1.attesting_indices + indices.pop(1) # remove an index, make room for the additional duplicate index. + indices.append(indices[2]) # add one of the indices a second time + attester_slashing.attestation_1.attesting_indices = sorted(indices) + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) # will have one attester signing it double + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +@always_bls +def test_att2_duplicate_index_double_signed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) + + indices = attester_slashing.attestation_2.attesting_indices + indices.pop(1) # remove an index, make room for the additional duplicate index. + indices.append(indices[2]) # add one of the indices a second time + attester_slashing.attestation_2.attesting_indices = sorted(indices) + sign_indexed_attestation(spec, state, attester_slashing.attestation_2) # will have one attester signing it double + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + @with_all_phases @spec_state_test def test_unsorted_att_1(spec, state): diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py index bfd992ffa2..526aba277e 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py @@ -17,24 +17,80 @@ def mock_deposit(spec, state, index): @with_all_phases @spec_state_test -def test_activation(spec, state): +def test_add_to_activation_queue(spec, state): + # move past first two irregular epochs wrt finality + next_epoch(spec, state) + next_epoch(spec, state) + + index = 0 + mock_deposit(spec, state, index) + + yield from run_process_registry_updates(spec, state) + + # validator moved into queue + assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH + assert state.validators[index].activation_epoch == spec.FAR_FUTURE_EPOCH + assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) + + +@with_all_phases +@spec_state_test +def test_activation_queue_to_activated_if_finalized(spec, state): + # move past first two irregular epochs wrt finality + next_epoch(spec, state) + next_epoch(spec, state) + index = 0 mock_deposit(spec, state, index) - for _ in range(spec.MAX_SEED_LOOKAHEAD + 1): - next_epoch(spec, state) + # mock validator as having been in queue since latest finalized + state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 + state.validators[index].activation_eligibility_epoch = state.finalized_checkpoint.epoch + + assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) yield from run_process_registry_updates(spec, state) + # validator activated for future epoch assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validators[index].activation_epoch != spec.FAR_FUTURE_EPOCH - assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) + assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) + assert spec.is_active_validator( + state.validators[index], + spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + ) + + +@with_all_phases +@spec_state_test +def test_activation_queue_no_activation_no_finality(spec, state): + # move past first two irregular epochs wrt finality + next_epoch(spec, state) + next_epoch(spec, state) + + index = 0 + mock_deposit(spec, state, index) + + # mock validator as having been in queue only after latest finalized + state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 + state.validators[index].activation_eligibility_epoch = state.finalized_checkpoint.epoch + 1 + + assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) + + yield from run_process_registry_updates(spec, state) + + # validator not activated + assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH + assert state.validators[index].activation_epoch == spec.FAR_FUTURE_EPOCH @with_all_phases @spec_state_test def test_activation_queue_sorting(spec, state): - mock_activations = 10 + churn_limit = spec.get_validator_churn_limit(state) + + # try to activate more than the per-epoch churn linmit + mock_activations = churn_limit * 2 epoch = spec.get_current_epoch(state) for i in range(mock_activations): @@ -44,9 +100,9 @@ def test_activation_queue_sorting(spec, state): # give the last priority over the others state.validators[mock_activations - 1].activation_eligibility_epoch = epoch - # make sure we are hitting the churn - churn_limit = spec.get_validator_churn_limit(state) - assert mock_activations > churn_limit + # move state forward and finalize to allow for activations + state.slot += spec.SLOTS_PER_EPOCH * 3 + state.finalized_checkpoint.epoch = epoch + 1 yield from run_process_registry_updates(spec, state) @@ -63,6 +119,38 @@ def test_activation_queue_sorting(spec, state): assert state.validators[churn_limit - 2].activation_epoch != spec.FAR_FUTURE_EPOCH +@with_all_phases +@spec_state_test +def test_activation_queue_efficiency(spec, state): + churn_limit = spec.get_validator_churn_limit(state) + mock_activations = churn_limit * 2 + + epoch = spec.get_current_epoch(state) + for i in range(mock_activations): + mock_deposit(spec, state, i) + state.validators[i].activation_eligibility_epoch = epoch + 1 + + # move state forward and finalize to allow for activations + state.slot += spec.SLOTS_PER_EPOCH * 3 + state.finalized_checkpoint.epoch = epoch + 1 + + # Run first registry update. Do not yield test vectors + for _ in run_process_registry_updates(spec, state): + pass + + # Half should churn in first run of registry update + for i in range(mock_activations): + if i < mock_activations // 2: + assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH + else: + assert state.validators[i].activation_epoch == spec.FAR_FUTURE_EPOCH + + # Second half should churn in second run of registry update + yield from run_process_registry_updates(spec, state) + for i in range(mock_activations): + assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH + + @with_all_phases @spec_state_test def test_ejection(spec, state): @@ -73,13 +161,87 @@ def test_ejection(spec, state): # Mock an ejection state.validators[index].effective_balance = spec.EJECTION_BALANCE - for _ in range(spec.MAX_SEED_LOOKAHEAD + 1): - next_epoch(spec, state) - yield from run_process_registry_updates(spec, state) assert state.validators[index].exit_epoch != spec.FAR_FUTURE_EPOCH + assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) assert not spec.is_active_validator( state.validators[index], - spec.get_current_epoch(state), + spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + ) + + +@with_all_phases +@spec_state_test +def test_ejection_past_churn_limit(spec, state): + churn_limit = spec.get_validator_churn_limit(state) + + # try to eject more than per-epoch churn limit + mock_ejections = churn_limit * 3 + + for i in range(mock_ejections): + state.validators[i].effective_balance = spec.EJECTION_BALANCE + + expected_ejection_epoch = spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + + yield from run_process_registry_updates(spec, state) + + for i in range(mock_ejections): + # first third ejected in normal speed + if i < mock_ejections // 3: + assert state.validators[i].exit_epoch == expected_ejection_epoch + # second thirdgets delayed by 1 epoch + elif mock_ejections // 3 <= i < mock_ejections * 2 // 3: + assert state.validators[i].exit_epoch == expected_ejection_epoch + 1 + # second thirdgets delayed by 2 epochs + else: + assert state.validators[i].exit_epoch == expected_ejection_epoch + 2 + + +@with_all_phases +@spec_state_test +def test_activation_queue_activation_and_ejection(spec, state): + # move past first two irregular epochs wrt finality + next_epoch(spec, state) + next_epoch(spec, state) + + # ready for entrance into activation queue + activation_queue_index = 0 + mock_deposit(spec, state, activation_queue_index) + + # ready for activation + activation_index = 1 + mock_deposit(spec, state, activation_index) + state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 + state.validators[activation_index].activation_eligibility_epoch = state.finalized_checkpoint.epoch + + # ready for ejection + ejection_index = 2 + state.validators[ejection_index].effective_balance = spec.EJECTION_BALANCE + + yield from run_process_registry_updates(spec, state) + + # validator moved into activation queue + validator = state.validators[activation_queue_index] + assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH + assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH + assert not spec.is_active_validator(validator, spec.get_current_epoch(state)) + + # validator activated for future epoch + validator = state.validators[activation_index] + assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH + assert validator.activation_epoch != spec.FAR_FUTURE_EPOCH + assert not spec.is_active_validator(validator, spec.get_current_epoch(state)) + assert spec.is_active_validator( + validator, + spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + ) + + # validator ejected for future epoch + validator = state.validators[ejection_index] + assert validator.exit_epoch != spec.FAR_FUTURE_EPOCH + assert spec.is_active_validator(validator, spec.get_current_epoch(state)) + assert not spec.is_active_validator( + validator, + spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) ) diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py index 41316e92d2..c2f980ba07 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py @@ -3,7 +3,7 @@ from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.bls import bls_sign -from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block +from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block, \ transition_unsigned_block from eth2spec.test.helpers.keys import privkeys, pubkeys @@ -253,6 +253,58 @@ def test_attester_slashing(spec, state): ) +@with_all_phases +@spec_state_test +def test_proposer_after_inactive_index(spec, state): + # disable some low validator index to check after for + inactive_index = 10 + state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state) + + # skip forward, get brand new proposers + state.slot = spec.SLOTS_PER_EPOCH * 2 + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) + + while True: + next_slot(spec, state) + proposer_index = spec.get_beacon_proposer_index(state) + if proposer_index > inactive_index: + # found a proposer that has a higher index than a disabled validator + yield 'pre', state + # test if the proposer can be recognized correctly after the inactive validator + signed_block = state_transition_and_sign_block(spec, state, build_empty_block(spec, state)) + yield 'blocks', [signed_block] + yield 'post', state + break + + +@with_all_phases +@spec_state_test +def test_high_proposer_index(spec, state): + # disable a good amount of validators to make the active count lower, for a faster test + current_epoch = spec.get_current_epoch(state) + for i in range(len(state.validators) // 3): + state.validators[i].exit_epoch = current_epoch + + # skip forward, get brand new proposers + state.slot = spec.SLOTS_PER_EPOCH * 2 + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) + + active_count = len(spec.get_active_validator_indices(state, current_epoch)) + while True: + next_slot(spec, state) + proposer_index = spec.get_beacon_proposer_index(state) + if proposer_index >= active_count: + # found a proposer that has a higher index than the active validator count + yield 'pre', state + # test if the proposer can be recognized correctly, even while it has a high index. + signed_block = state_transition_and_sign_block(spec, state, build_empty_block(spec, state)) + yield 'blocks', [signed_block] + yield 'post', state + break + + @with_all_phases @spec_state_test def test_expected_deposit_in_block(spec, state): diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt index b5229ae20f..e8ecd12a66 100644 --- a/test_libs/pyspec/requirements-testing.txt +++ b/test_libs/pyspec/requirements-testing.txt @@ -2,6 +2,6 @@ pytest>=4.4 ../config_helpers flake8==3.7.7 -mypy==0.701 +mypy==0.750 pytest-cov pytest-xdist
magenta__magenta-785
numpy dependency missing? magenta/models/sketch_rnn/utils.py has ```import numpy as np```, but magenta/tools/pip/setup.py doesn't list it as a dependency.
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py" } ]
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py" } ]
diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py index b37e9c8cce..8a1f5e55cb 100644 --- a/magenta/tools/pip/setup.py +++ b/magenta/tools/pip/setup.py @@ -31,6 +31,7 @@ 'intervaltree >= 2.1.0', 'matplotlib >= 1.5.3', 'mido == 1.2.6', + 'numpy >= 1.11.0', 'pandas >= 0.18.1', 'pretty_midi >= 0.2.6', 'python-rtmidi',
conda__conda-2772
conda update conda doesn't get latest conda-env It's annoying we even have this problem, but... ``` root@default:~ # conda update conda Fetching package metadata: ...... .Solving package specifications: ......... Package plan for installation in environment /usr/local: The following packages will be downloaded: package | build ---------------------------|----------------- conda-env-2.5.0 | py27_0 28 KB conda-4.1.2 | py27_0 198 KB ------------------------------------------------------------ Total: 226 KB The following NEW packages will be INSTALLED: ruamel_yaml: 0.11.7-py27_0 The following packages will be UPDATED: conda: 4.0.5-py27_0 --> 4.1.2-py27_0 conda-env: 2.4.5-py27_0 --> 2.5.0-py27_0 Proceed ([y]/n)? y Fetching packages ... conda-env-2.5. 100% |#########################################################################################| Time: 0:00:00 587.12 kB/s conda-4.1.2-py 100% |#########################################################################################| Time: 0:00:00 994.90 kB/s Extracting packages ... [ COMPLETE ]|############################################################################################################| 100% Unlinking packages ... [ COMPLETE ]|############################################################################################################| 100% Linking packages ... [ COMPLETE ]|############################################################################################################| 100% root@default:~ # conda update conda-env Fetching package metadata ......... Solving package specifications: .......... Package plan for installation in environment /usr/local: The following packages will be downloaded: package | build ---------------------------|----------------- conda-env-2.5.1 | py27_0 26 KB The following packages will be UPDATED: conda-env: 2.5.0-py27_0 --> 2.5.1-py27_0 Proceed ([y]/n)? y Fetching packages ... conda-env-2.5. 100% |#########################################################################################| Time: 0:00:00 569.65 kB/s Extracting packages ... [ COMPLETE ]|############################################################################################################| 100% Unlinking packages ... [ COMPLETE ]|############################################################################################################| 100% Linking packages ... [ COMPLETE ]|############################################################################################################| 100% ``` conda update conda doesn't get latest conda-env It's annoying we even have this problem, but... ``` root@default:~ # conda update conda Fetching package metadata: ...... .Solving package specifications: ......... Package plan for installation in environment /usr/local: The following packages will be downloaded: package | build ---------------------------|----------------- conda-env-2.5.0 | py27_0 28 KB conda-4.1.2 | py27_0 198 KB ------------------------------------------------------------ Total: 226 KB The following NEW packages will be INSTALLED: ruamel_yaml: 0.11.7-py27_0 The following packages will be UPDATED: conda: 4.0.5-py27_0 --> 4.1.2-py27_0 conda-env: 2.4.5-py27_0 --> 2.5.0-py27_0 Proceed ([y]/n)? y Fetching packages ... conda-env-2.5. 100% |#########################################################################################| Time: 0:00:00 587.12 kB/s conda-4.1.2-py 100% |#########################################################################################| Time: 0:00:00 994.90 kB/s Extracting packages ... [ COMPLETE ]|############################################################################################################| 100% Unlinking packages ... [ COMPLETE ]|############################################################################################################| 100% Linking packages ... [ COMPLETE ]|############################################################################################################| 100% root@default:~ # conda update conda-env Fetching package metadata ......... Solving package specifications: .......... Package plan for installation in environment /usr/local: The following packages will be downloaded: package | build ---------------------------|----------------- conda-env-2.5.1 | py27_0 26 KB The following packages will be UPDATED: conda-env: 2.5.0-py27_0 --> 2.5.1-py27_0 Proceed ([y]/n)? y Fetching packages ... conda-env-2.5. 100% |#########################################################################################| Time: 0:00:00 569.65 kB/s Extracting packages ... [ COMPLETE ]|############################################################################################################| 100% Unlinking packages ... [ COMPLETE ]|############################################################################################################| 100% Linking packages ... [ COMPLETE ]|############################################################################################################| 100% ```
[ { "content": "\"\"\"\nHandle the planning of installs and their execution.\n\nNOTE:\n conda.install uses canonical package names in its interface functions,\n whereas conda.resolve uses package filenames, as those are used as index\n keys. We try to keep fixes to this \"impedance mismatch\" local to this\n module.\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\nimport sys\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom os.path import abspath, basename, dirname, join, exists\n\nfrom . import instructions as inst\nfrom .config import (always_copy as config_always_copy, channel_priority,\n show_channel_urls as config_show_channel_urls,\n root_dir, allow_softlinks, default_python, auto_update_conda,\n track_features, foreign, url_channel, canonical_channel_name)\nfrom .exceptions import CondaException\nfrom .history import History\nfrom .install import (dist2quad, LINK_HARD, link_name_map, name_dist, is_fetched,\n is_extracted, is_linked, find_new_location, dist2filename, LINK_COPY,\n LINK_SOFT, try_hard_link, rm_rf)\nfrom .resolve import MatchSpec, Resolve, Package\nfrom .utils import md5_file, human_bytes\n\n# For backwards compatibility\n\nlog = getLogger(__name__)\n\ndef print_dists(dists_extras):\n fmt = \" %-27s|%17s\"\n print(fmt % ('package', 'build'))\n print(fmt % ('-' * 27, '-' * 17))\n for dist, extra in dists_extras:\n dist = dist2quad(dist)\n line = fmt % (dist[0]+'-'+dist[1], dist[2])\n if extra:\n line += extra\n print(line)\n\n\ndef display_actions(actions, index, show_channel_urls=None):\n if show_channel_urls is None:\n show_channel_urls = config_show_channel_urls\n\n def channel_str(rec):\n if 'schannel' in rec:\n return rec['schannel']\n if 'url' in rec:\n return url_channel(rec['url'])[1]\n if 'channel' in rec:\n return canonical_channel_name(rec['channel'])\n return '<unknown>'\n\n def channel_filt(s):\n if show_channel_urls is False:\n return ''\n if show_channel_urls is None and s == 'defaults':\n return ''\n return s\n\n if actions.get(inst.FETCH):\n print(\"\\nThe following packages will be downloaded:\\n\")\n\n disp_lst = []\n for dist in actions[inst.FETCH]:\n info = index[dist + '.tar.bz2']\n extra = '%15s' % human_bytes(info['size'])\n schannel = channel_filt(channel_str(info))\n if schannel:\n extra += ' ' + schannel\n disp_lst.append((dist, extra))\n print_dists(disp_lst)\n\n if index and len(actions[inst.FETCH]) > 1:\n num_bytes = sum(index[dist + '.tar.bz2']['size']\n for dist in actions[inst.FETCH])\n print(' ' * 4 + '-' * 60)\n print(\" \" * 43 + \"Total: %14s\" % human_bytes(num_bytes))\n\n # package -> [oldver-oldbuild, newver-newbuild]\n packages = defaultdict(lambda: list(('', '')))\n features = defaultdict(lambda: list(('', '')))\n channels = defaultdict(lambda: list(('', '')))\n records = defaultdict(lambda: list((None, None)))\n linktypes = {}\n\n for arg in actions.get(inst.LINK, []):\n dist, lt, shortcuts = inst.split_linkarg(arg)\n fkey = dist + '.tar.bz2'\n rec = index[fkey]\n pkg = rec['name']\n channels[pkg][1] = channel_str(rec)\n packages[pkg][1] = rec['version'] + '-' + rec['build']\n records[pkg][1] = Package(fkey, rec)\n linktypes[pkg] = lt\n features[pkg][1] = rec.get('features', '')\n for arg in actions.get(inst.UNLINK, []):\n dist, lt, shortcuts = inst.split_linkarg(arg)\n fkey = dist + '.tar.bz2'\n rec = index.get(fkey)\n if rec is None:\n pkg, ver, build, schannel = dist2quad(dist)\n rec = dict(name=pkg, version=ver, build=build, channel=None,\n schannel='<unknown>',\n build_number=int(build) if build.isdigit() else 0)\n pkg = rec['name']\n channels[pkg][0] = channel_str(rec)\n packages[pkg][0] = rec['version'] + '-' + rec['build']\n records[pkg][0] = Package(fkey, rec)\n features[pkg][0] = rec.get('features', '')\n\n # Put a minimum length here---. .--For the :\n # v v\n\n new = {p for p in packages if not packages[p][0]}\n removed = {p for p in packages if not packages[p][1]}\n # New packages are actually listed in the left-hand column,\n # so let's move them over there\n for pkg in new:\n for var in (packages, features, channels, records):\n var[pkg] = var[pkg][::-1]\n\n if packages:\n maxpkg = max(len(p) for p in packages) + 1\n maxoldver = max(len(p[0]) for p in packages.values())\n maxnewver = max(len(p[1]) for p in packages.values())\n maxoldfeatures = max(len(p[0]) for p in features.values())\n maxnewfeatures = max(len(p[1]) for p in features.values())\n maxoldchannels = max(len(channel_filt(p[0])) for p in channels.values())\n maxnewchannels = max(len(channel_filt(p[1])) for p in channels.values())\n updated = set()\n downgraded = set()\n channeled = set()\n oldfmt = {}\n newfmt = {}\n for pkg in packages:\n # That's right. I'm using old-style string formatting to generate a\n # string with new-style string formatting.\n oldfmt[pkg] = '{pkg:<%s} {vers[0]:<%s}' % (maxpkg, maxoldver)\n if maxoldchannels:\n oldfmt[pkg] += ' {channels[0]:<%s}' % maxoldchannels\n if features[pkg][0]:\n oldfmt[pkg] += ' [{features[0]:<%s}]' % maxoldfeatures\n\n lt = linktypes.get(pkg, LINK_HARD)\n lt = '' if lt == LINK_HARD else (' (%s)' % link_name_map[lt])\n if pkg in removed or pkg in new:\n oldfmt[pkg] += lt\n continue\n\n newfmt[pkg] = '{vers[1]:<%s}' % maxnewver\n if maxnewchannels:\n newfmt[pkg] += ' {channels[1]:<%s}' % maxnewchannels\n if features[pkg][1]:\n newfmt[pkg] += ' [{features[1]:<%s}]' % maxnewfeatures\n newfmt[pkg] += lt\n\n P0 = records[pkg][0]\n P1 = records[pkg][1]\n pri0 = P0.priority\n pri1 = P1.priority\n if pri0 is None or pri1 is None:\n pri0 = pri1 = 1\n try:\n if str(P1.version) == 'custom':\n newver = str(P0.version) != 'custom'\n oldver = not newver\n else:\n # <= here means that unchanged packages will be put in updated\n newver = P0.norm_version < P1.norm_version\n oldver = P0.norm_version > P1.norm_version\n except TypeError:\n newver = P0.version < P1.version\n oldver = P0.version > P1.version\n oldbld = P0.build_number > P1.build_number\n if channel_priority and pri1 < pri0 and (oldver or not newver and oldbld):\n channeled.add(pkg)\n elif newver:\n updated.add(pkg)\n elif pri1 < pri0 and (oldver or not newver and oldbld):\n channeled.add(pkg)\n elif oldver:\n downgraded.add(pkg)\n elif not oldbld:\n updated.add(pkg)\n else:\n downgraded.add(pkg)\n\n arrow = ' --> '\n lead = ' ' * 4\n\n def format(s, pkg):\n chans = [channel_filt(c) for c in channels[pkg]]\n return lead + s.format(pkg=pkg + ':', vers=packages[pkg],\n channels=chans, features=features[pkg])\n\n if new:\n print(\"\\nThe following NEW packages will be INSTALLED:\\n\")\n for pkg in sorted(new):\n # New packages have been moved to the \"old\" column for display\n print(format(oldfmt[pkg], pkg))\n\n if removed:\n print(\"\\nThe following packages will be REMOVED:\\n\")\n for pkg in sorted(removed):\n print(format(oldfmt[pkg], pkg))\n\n if updated:\n print(\"\\nThe following packages will be UPDATED:\\n\")\n for pkg in sorted(updated):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n if channeled:\n print(\"\\nThe following packages will be SUPERCEDED by a higher-priority channel:\\n\")\n for pkg in sorted(channeled):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n if downgraded:\n print(\"\\nThe following packages will be DOWNGRADED due to dependency conflicts:\\n\")\n for pkg in sorted(downgraded):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n print()\n\n\ndef nothing_to_do(actions):\n for op in inst.action_codes:\n if actions.get(op):\n return False\n return True\n\n\ndef add_unlink(actions, dist):\n if inst.UNLINK not in actions:\n actions[inst.UNLINK] = []\n actions[inst.UNLINK].append(dist)\n\n\ndef plan_from_actions(actions):\n if 'op_order' in actions and actions['op_order']:\n op_order = actions['op_order']\n else:\n op_order = inst.action_codes\n\n assert inst.PREFIX in actions and actions[inst.PREFIX]\n res = [('PREFIX', '%s' % actions[inst.PREFIX])]\n\n if sys.platform == 'win32':\n # Always link/unlink menuinst first on windows in case a subsequent\n # package tries to import it to create/remove a shortcut\n\n for op in (inst.UNLINK, inst.FETCH, inst.EXTRACT, inst.LINK):\n if op in actions:\n pkgs = []\n for pkg in actions[op]:\n if 'menuinst' in pkg:\n res.append((op, pkg))\n else:\n pkgs.append(pkg)\n actions[op] = pkgs\n\n for op in op_order:\n if op not in actions:\n continue\n if not actions[op]:\n continue\n if '_' not in op:\n res.append((inst.PRINT, '%sing packages ...' % op.capitalize()))\n elif op.startswith('RM_'):\n res.append((inst.PRINT, 'Pruning %s packages from the cache ...' % op[3:].lower()))\n if op in inst.progress_cmds:\n res.append((inst.PROGRESS, '%d' % len(actions[op])))\n for arg in actions[op]:\n res.append((op, arg))\n\n return res\n\n\n# force_linked_actions has now been folded into this function, and is enabled by\n# supplying an index and setting force=True\ndef ensure_linked_actions(dists, prefix, index=None, force=False,\n always_copy=False, shortcuts=False):\n actions = defaultdict(list)\n actions[inst.PREFIX] = prefix\n actions['op_order'] = (inst.RM_FETCHED, inst.FETCH, inst.RM_EXTRACTED,\n inst.EXTRACT, inst.UNLINK, inst.LINK)\n for dist in dists:\n fetched_in = is_fetched(dist)\n extracted_in = is_extracted(dist)\n\n if fetched_in and index is not None:\n # Test the MD5, and possibly re-fetch\n fn = dist + '.tar.bz2'\n try:\n if md5_file(fetched_in) != index[fn]['md5']:\n # RM_FETCHED now removes the extracted data too\n actions[inst.RM_FETCHED].append(dist)\n # Re-fetch, re-extract, re-link\n fetched_in = extracted_in = None\n force = True\n except KeyError:\n sys.stderr.write('Warning: cannot lookup MD5 of: %s' % fn)\n\n if not force and is_linked(prefix, dist):\n continue\n\n if extracted_in and force:\n # Always re-extract in the force case\n actions[inst.RM_EXTRACTED].append(dist)\n extracted_in = None\n\n # Otherwise we need to extract, and possibly fetch\n if not extracted_in and not fetched_in:\n # If there is a cache conflict, clean it up\n fetched_in, conflict = find_new_location(dist)\n fetched_in = join(fetched_in, dist2filename(dist))\n if conflict is not None:\n actions[inst.RM_FETCHED].append(conflict)\n actions[inst.FETCH].append(dist)\n\n if not extracted_in:\n actions[inst.EXTRACT].append(dist)\n\n fetched_dist = extracted_in or fetched_in[:-8]\n fetched_dir = dirname(fetched_dist)\n\n try:\n # Determine what kind of linking is necessary\n if not extracted_in:\n # If not already extracted, create some dummy\n # data to test with\n rm_rf(fetched_dist)\n ppath = join(fetched_dist, 'info')\n os.makedirs(ppath)\n index_json = join(ppath, 'index.json')\n with open(index_json, 'w'):\n pass\n if config_always_copy or always_copy:\n lt = LINK_COPY\n elif try_hard_link(fetched_dir, prefix, dist):\n lt = LINK_HARD\n elif allow_softlinks and sys.platform != 'win32':\n lt = LINK_SOFT\n else:\n lt = LINK_COPY\n actions[inst.LINK].append('%s %d %s' % (dist, lt, shortcuts))\n\n except (OSError, IOError):\n actions[inst.LINK].append('%s %d %s' % (dist, LINK_COPY, shortcuts))\n finally:\n if not extracted_in:\n # Remove the dummy data\n try:\n rm_rf(fetched_dist)\n except (OSError, IOError):\n pass\n\n return actions\n\n# -------------------------------------------------------------------\n\n\ndef is_root_prefix(prefix):\n return abspath(prefix) == abspath(root_dir)\n\n\ndef add_defaults_to_specs(r, linked, specs, update=False):\n # TODO: This should use the pinning mechanism. But don't change the API:\n # cas uses it.\n if r.explicit(specs):\n return\n log.debug('H0 specs=%r' % specs)\n linked = [d if d.endswith('.tar.bz2') else d + '.tar.bz2' for d in linked]\n names_linked = {r.index[fn]['name']: fn for fn in linked if fn in r.index}\n mspecs = list(map(MatchSpec, specs))\n\n for name, def_ver in [('python', default_python),\n # Default version required, but only used for Python\n ('lua', None)]:\n if any(s.name == name and not s.is_simple() for s in mspecs):\n # if any of the specifications mention the Python/Numpy version,\n # we don't need to add the default spec\n log.debug('H1 %s' % name)\n continue\n\n depends_on = {s for s in mspecs if r.depends_on(s, name)}\n any_depends_on = bool(depends_on)\n log.debug('H2 %s %s' % (name, any_depends_on))\n\n if not any_depends_on:\n # if nothing depends on Python/Numpy AND the Python/Numpy is not\n # specified, we don't need to add the default spec\n log.debug('H2A %s' % name)\n continue\n\n if any(s.is_exact() for s in depends_on):\n # If something depends on Python/Numpy, but the spec is very\n # explicit, we also don't need to add the default spec\n log.debug('H2B %s' % name)\n continue\n\n if name in names_linked:\n # if Python/Numpy is already linked, we add that instead of the\n # default\n log.debug('H3 %s' % name)\n fkey = names_linked[name]\n info = r.index[fkey]\n ver = '.'.join(info['version'].split('.', 2)[:2])\n spec = '%s %s* (target=%s)' % (info['name'], ver, fkey)\n specs.append(spec)\n continue\n\n if name == 'python' and def_ver.startswith('3.'):\n # Don't include Python 3 in the specs if this is the Python 3\n # version of conda.\n continue\n\n if def_ver is not None:\n specs.append('%s %s*' % (name, def_ver))\n log.debug('HF specs=%r' % specs)\n\n\ndef get_pinned_specs(prefix):\n pinfile = join(prefix, 'conda-meta', 'pinned')\n if not exists(pinfile):\n return []\n with open(pinfile) as f:\n return [i for i in f.read().strip().splitlines() if i and not i.strip().startswith('#')]\n\ndef install_actions(prefix, index, specs, force=False, only_names=None, always_copy=False,\n pinned=True, minimal_hint=False, update_deps=True, prune=False,\n shortcuts=False):\n r = Resolve(index)\n linked = r.installed\n\n if auto_update_conda and is_root_prefix(prefix):\n specs.append('conda')\n\n if pinned:\n pinned_specs = get_pinned_specs(prefix)\n log.debug(\"Pinned specs=%s\" % pinned_specs)\n specs += pinned_specs\n\n must_have = {}\n if track_features:\n specs.extend(x + '@' for x in track_features)\n\n pkgs = r.install(specs, linked, update_deps=update_deps)\n\n for fn in pkgs:\n dist = fn[:-8]\n name = name_dist(dist)\n if not name or only_names and name not in only_names:\n continue\n must_have[name] = dist\n\n if is_root_prefix(prefix):\n for name in foreign:\n if name in must_have:\n del must_have[name]\n elif basename(prefix).startswith('_'):\n # anything (including conda) can be installed into environments\n # starting with '_', mainly to allow conda-build to build conda\n pass\n else:\n # disallow conda from being installed into all other environments\n if 'conda' in must_have or 'conda-env' in must_have:\n sys.exit(\"Error: 'conda' can only be installed into the \"\n \"root environment\")\n\n smh = r.dependency_sort(must_have)\n\n actions = ensure_linked_actions(\n smh, prefix,\n index=index if force else None,\n force=force, always_copy=always_copy,\n shortcuts=shortcuts)\n\n if actions[inst.LINK]:\n actions[inst.SYMLINK_CONDA] = [root_dir]\n\n for fkey in sorted(linked):\n dist = fkey[:-8]\n name = name_dist(dist)\n replace_existing = name in must_have and dist != must_have[name]\n prune_it = prune and dist not in smh\n if replace_existing or prune_it:\n add_unlink(actions, dist)\n\n return actions\n\n\ndef remove_actions(prefix, specs, index, force=False, pinned=True):\n r = Resolve(index)\n linked = r.installed\n\n if force:\n mss = list(map(MatchSpec, specs))\n nlinked = {r.package_name(fn): fn[:-8]\n for fn in linked\n if not any(r.match(ms, fn) for ms in mss)}\n else:\n add_defaults_to_specs(r, linked, specs, update=True)\n nlinked = {r.package_name(fn): fn[:-8] for fn in r.remove(specs, linked)}\n\n if pinned:\n pinned_specs = get_pinned_specs(prefix)\n log.debug(\"Pinned specs=%s\" % pinned_specs)\n\n linked = {r.package_name(fn): fn[:-8] for fn in linked}\n\n actions = ensure_linked_actions(r.dependency_sort(nlinked), prefix)\n for old_fn in reversed(r.dependency_sort(linked)):\n dist = old_fn + '.tar.bz2'\n name = r.package_name(dist)\n if old_fn == nlinked.get(name, ''):\n continue\n if pinned and any(r.match(ms, dist) for ms in pinned_specs):\n msg = \"Cannot remove %s becaue it is pinned. Use --no-pin to override.\"\n raise RuntimeError(msg % dist)\n if name == 'conda' and name not in nlinked:\n if any(s.split(' ', 1)[0] == 'conda' for s in specs):\n sys.exit(\"Error: 'conda' cannot be removed from the root environment\")\n else:\n msg = (\"Error: this 'remove' command cannot be executed because it\\n\"\n \"would require removing 'conda' dependencies\")\n sys.exit(msg)\n add_unlink(actions, old_fn)\n\n return actions\n\n\ndef remove_features_actions(prefix, index, features):\n r = Resolve(index)\n linked = r.installed\n\n actions = defaultdict(list)\n actions[inst.PREFIX] = prefix\n _linked = [d + '.tar.bz2' for d in linked]\n to_link = []\n for dist in sorted(linked):\n fn = dist + '.tar.bz2'\n if fn not in index:\n continue\n if r.track_features(fn).intersection(features):\n add_unlink(actions, dist)\n if r.features(fn).intersection(features):\n add_unlink(actions, dist)\n subst = r.find_substitute(_linked, features, fn)\n if subst:\n to_link.append(subst[:-8])\n\n if to_link:\n actions.update(ensure_linked_actions(to_link, prefix))\n return actions\n\n\ndef revert_actions(prefix, revision=-1):\n h = History(prefix)\n h.update()\n try:\n state = h.get_state(revision)\n except IndexError:\n sys.exit(\"Error: no such revision: %d\" % revision)\n\n curr = h.get_state()\n if state == curr:\n return {}\n\n actions = ensure_linked_actions(state, prefix)\n for dist in curr - state:\n add_unlink(actions, dist)\n\n return actions\n\n# ---------------------------- EXECUTION --------------------------\n\n\ndef execute_actions(actions, index=None, verbose=False):\n plan = plan_from_actions(actions)\n with History(actions[inst.PREFIX]):\n inst.execute_instructions(plan, index, verbose)\n\n\ndef update_old_plan(old_plan):\n \"\"\"\n Update an old plan object to work with\n `conda.instructions.execute_instructions`\n \"\"\"\n plan = []\n for line in old_plan:\n if line.startswith('#'):\n continue\n if ' ' not in line:\n raise CondaException(\n \"The instruction '%s' takes at least one argument\" % line\n )\n\n instruction, arg = line.split(' ', 1)\n plan.append((instruction, arg))\n return plan\n\n\ndef execute_plan(old_plan, index=None, verbose=False):\n \"\"\"\n Deprecated: This should `conda.instructions.execute_instructions` instead\n \"\"\"\n plan = update_old_plan(old_plan)\n inst.execute_instructions(plan, index, verbose)\n\n\nif __name__ == '__main__':\n # for testing new revert_actions() only\n from pprint import pprint\n pprint(dict(revert_actions(sys.prefix, int(sys.argv[1]))))\n", "path": "conda/plan.py" } ]
[ { "content": "\"\"\"\nHandle the planning of installs and their execution.\n\nNOTE:\n conda.install uses canonical package names in its interface functions,\n whereas conda.resolve uses package filenames, as those are used as index\n keys. We try to keep fixes to this \"impedance mismatch\" local to this\n module.\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\nimport sys\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom os.path import abspath, basename, dirname, join, exists\n\nfrom . import instructions as inst\nfrom .config import (always_copy as config_always_copy, channel_priority,\n show_channel_urls as config_show_channel_urls,\n root_dir, allow_softlinks, default_python, auto_update_conda,\n track_features, foreign, url_channel, canonical_channel_name)\nfrom .exceptions import CondaException\nfrom .history import History\nfrom .install import (dist2quad, LINK_HARD, link_name_map, name_dist, is_fetched,\n is_extracted, is_linked, find_new_location, dist2filename, LINK_COPY,\n LINK_SOFT, try_hard_link, rm_rf)\nfrom .resolve import MatchSpec, Resolve, Package\nfrom .utils import md5_file, human_bytes\n\n# For backwards compatibility\n\nlog = getLogger(__name__)\n\ndef print_dists(dists_extras):\n fmt = \" %-27s|%17s\"\n print(fmt % ('package', 'build'))\n print(fmt % ('-' * 27, '-' * 17))\n for dist, extra in dists_extras:\n dist = dist2quad(dist)\n line = fmt % (dist[0]+'-'+dist[1], dist[2])\n if extra:\n line += extra\n print(line)\n\n\ndef display_actions(actions, index, show_channel_urls=None):\n if show_channel_urls is None:\n show_channel_urls = config_show_channel_urls\n\n def channel_str(rec):\n if 'schannel' in rec:\n return rec['schannel']\n if 'url' in rec:\n return url_channel(rec['url'])[1]\n if 'channel' in rec:\n return canonical_channel_name(rec['channel'])\n return '<unknown>'\n\n def channel_filt(s):\n if show_channel_urls is False:\n return ''\n if show_channel_urls is None and s == 'defaults':\n return ''\n return s\n\n if actions.get(inst.FETCH):\n print(\"\\nThe following packages will be downloaded:\\n\")\n\n disp_lst = []\n for dist in actions[inst.FETCH]:\n info = index[dist + '.tar.bz2']\n extra = '%15s' % human_bytes(info['size'])\n schannel = channel_filt(channel_str(info))\n if schannel:\n extra += ' ' + schannel\n disp_lst.append((dist, extra))\n print_dists(disp_lst)\n\n if index and len(actions[inst.FETCH]) > 1:\n num_bytes = sum(index[dist + '.tar.bz2']['size']\n for dist in actions[inst.FETCH])\n print(' ' * 4 + '-' * 60)\n print(\" \" * 43 + \"Total: %14s\" % human_bytes(num_bytes))\n\n # package -> [oldver-oldbuild, newver-newbuild]\n packages = defaultdict(lambda: list(('', '')))\n features = defaultdict(lambda: list(('', '')))\n channels = defaultdict(lambda: list(('', '')))\n records = defaultdict(lambda: list((None, None)))\n linktypes = {}\n\n for arg in actions.get(inst.LINK, []):\n dist, lt, shortcuts = inst.split_linkarg(arg)\n fkey = dist + '.tar.bz2'\n rec = index[fkey]\n pkg = rec['name']\n channels[pkg][1] = channel_str(rec)\n packages[pkg][1] = rec['version'] + '-' + rec['build']\n records[pkg][1] = Package(fkey, rec)\n linktypes[pkg] = lt\n features[pkg][1] = rec.get('features', '')\n for arg in actions.get(inst.UNLINK, []):\n dist, lt, shortcuts = inst.split_linkarg(arg)\n fkey = dist + '.tar.bz2'\n rec = index.get(fkey)\n if rec is None:\n pkg, ver, build, schannel = dist2quad(dist)\n rec = dict(name=pkg, version=ver, build=build, channel=None,\n schannel='<unknown>',\n build_number=int(build) if build.isdigit() else 0)\n pkg = rec['name']\n channels[pkg][0] = channel_str(rec)\n packages[pkg][0] = rec['version'] + '-' + rec['build']\n records[pkg][0] = Package(fkey, rec)\n features[pkg][0] = rec.get('features', '')\n\n # Put a minimum length here---. .--For the :\n # v v\n\n new = {p for p in packages if not packages[p][0]}\n removed = {p for p in packages if not packages[p][1]}\n # New packages are actually listed in the left-hand column,\n # so let's move them over there\n for pkg in new:\n for var in (packages, features, channels, records):\n var[pkg] = var[pkg][::-1]\n\n if packages:\n maxpkg = max(len(p) for p in packages) + 1\n maxoldver = max(len(p[0]) for p in packages.values())\n maxnewver = max(len(p[1]) for p in packages.values())\n maxoldfeatures = max(len(p[0]) for p in features.values())\n maxnewfeatures = max(len(p[1]) for p in features.values())\n maxoldchannels = max(len(channel_filt(p[0])) for p in channels.values())\n maxnewchannels = max(len(channel_filt(p[1])) for p in channels.values())\n updated = set()\n downgraded = set()\n channeled = set()\n oldfmt = {}\n newfmt = {}\n for pkg in packages:\n # That's right. I'm using old-style string formatting to generate a\n # string with new-style string formatting.\n oldfmt[pkg] = '{pkg:<%s} {vers[0]:<%s}' % (maxpkg, maxoldver)\n if maxoldchannels:\n oldfmt[pkg] += ' {channels[0]:<%s}' % maxoldchannels\n if features[pkg][0]:\n oldfmt[pkg] += ' [{features[0]:<%s}]' % maxoldfeatures\n\n lt = linktypes.get(pkg, LINK_HARD)\n lt = '' if lt == LINK_HARD else (' (%s)' % link_name_map[lt])\n if pkg in removed or pkg in new:\n oldfmt[pkg] += lt\n continue\n\n newfmt[pkg] = '{vers[1]:<%s}' % maxnewver\n if maxnewchannels:\n newfmt[pkg] += ' {channels[1]:<%s}' % maxnewchannels\n if features[pkg][1]:\n newfmt[pkg] += ' [{features[1]:<%s}]' % maxnewfeatures\n newfmt[pkg] += lt\n\n P0 = records[pkg][0]\n P1 = records[pkg][1]\n pri0 = P0.priority\n pri1 = P1.priority\n if pri0 is None or pri1 is None:\n pri0 = pri1 = 1\n try:\n if str(P1.version) == 'custom':\n newver = str(P0.version) != 'custom'\n oldver = not newver\n else:\n # <= here means that unchanged packages will be put in updated\n newver = P0.norm_version < P1.norm_version\n oldver = P0.norm_version > P1.norm_version\n except TypeError:\n newver = P0.version < P1.version\n oldver = P0.version > P1.version\n oldbld = P0.build_number > P1.build_number\n if channel_priority and pri1 < pri0 and (oldver or not newver and oldbld):\n channeled.add(pkg)\n elif newver:\n updated.add(pkg)\n elif pri1 < pri0 and (oldver or not newver and oldbld):\n channeled.add(pkg)\n elif oldver:\n downgraded.add(pkg)\n elif not oldbld:\n updated.add(pkg)\n else:\n downgraded.add(pkg)\n\n arrow = ' --> '\n lead = ' ' * 4\n\n def format(s, pkg):\n chans = [channel_filt(c) for c in channels[pkg]]\n return lead + s.format(pkg=pkg + ':', vers=packages[pkg],\n channels=chans, features=features[pkg])\n\n if new:\n print(\"\\nThe following NEW packages will be INSTALLED:\\n\")\n for pkg in sorted(new):\n # New packages have been moved to the \"old\" column for display\n print(format(oldfmt[pkg], pkg))\n\n if removed:\n print(\"\\nThe following packages will be REMOVED:\\n\")\n for pkg in sorted(removed):\n print(format(oldfmt[pkg], pkg))\n\n if updated:\n print(\"\\nThe following packages will be UPDATED:\\n\")\n for pkg in sorted(updated):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n if channeled:\n print(\"\\nThe following packages will be SUPERCEDED by a higher-priority channel:\\n\")\n for pkg in sorted(channeled):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n if downgraded:\n print(\"\\nThe following packages will be DOWNGRADED due to dependency conflicts:\\n\")\n for pkg in sorted(downgraded):\n print(format(oldfmt[pkg] + arrow + newfmt[pkg], pkg))\n\n print()\n\n\ndef nothing_to_do(actions):\n for op in inst.action_codes:\n if actions.get(op):\n return False\n return True\n\n\ndef add_unlink(actions, dist):\n if inst.UNLINK not in actions:\n actions[inst.UNLINK] = []\n actions[inst.UNLINK].append(dist)\n\n\ndef plan_from_actions(actions):\n if 'op_order' in actions and actions['op_order']:\n op_order = actions['op_order']\n else:\n op_order = inst.action_codes\n\n assert inst.PREFIX in actions and actions[inst.PREFIX]\n res = [('PREFIX', '%s' % actions[inst.PREFIX])]\n\n if sys.platform == 'win32':\n # Always link/unlink menuinst first on windows in case a subsequent\n # package tries to import it to create/remove a shortcut\n\n for op in (inst.UNLINK, inst.FETCH, inst.EXTRACT, inst.LINK):\n if op in actions:\n pkgs = []\n for pkg in actions[op]:\n if 'menuinst' in pkg:\n res.append((op, pkg))\n else:\n pkgs.append(pkg)\n actions[op] = pkgs\n\n for op in op_order:\n if op not in actions:\n continue\n if not actions[op]:\n continue\n if '_' not in op:\n res.append((inst.PRINT, '%sing packages ...' % op.capitalize()))\n elif op.startswith('RM_'):\n res.append((inst.PRINT, 'Pruning %s packages from the cache ...' % op[3:].lower()))\n if op in inst.progress_cmds:\n res.append((inst.PROGRESS, '%d' % len(actions[op])))\n for arg in actions[op]:\n res.append((op, arg))\n\n return res\n\n\n# force_linked_actions has now been folded into this function, and is enabled by\n# supplying an index and setting force=True\ndef ensure_linked_actions(dists, prefix, index=None, force=False,\n always_copy=False, shortcuts=False):\n actions = defaultdict(list)\n actions[inst.PREFIX] = prefix\n actions['op_order'] = (inst.RM_FETCHED, inst.FETCH, inst.RM_EXTRACTED,\n inst.EXTRACT, inst.UNLINK, inst.LINK)\n for dist in dists:\n fetched_in = is_fetched(dist)\n extracted_in = is_extracted(dist)\n\n if fetched_in and index is not None:\n # Test the MD5, and possibly re-fetch\n fn = dist + '.tar.bz2'\n try:\n if md5_file(fetched_in) != index[fn]['md5']:\n # RM_FETCHED now removes the extracted data too\n actions[inst.RM_FETCHED].append(dist)\n # Re-fetch, re-extract, re-link\n fetched_in = extracted_in = None\n force = True\n except KeyError:\n sys.stderr.write('Warning: cannot lookup MD5 of: %s' % fn)\n\n if not force and is_linked(prefix, dist):\n continue\n\n if extracted_in and force:\n # Always re-extract in the force case\n actions[inst.RM_EXTRACTED].append(dist)\n extracted_in = None\n\n # Otherwise we need to extract, and possibly fetch\n if not extracted_in and not fetched_in:\n # If there is a cache conflict, clean it up\n fetched_in, conflict = find_new_location(dist)\n fetched_in = join(fetched_in, dist2filename(dist))\n if conflict is not None:\n actions[inst.RM_FETCHED].append(conflict)\n actions[inst.FETCH].append(dist)\n\n if not extracted_in:\n actions[inst.EXTRACT].append(dist)\n\n fetched_dist = extracted_in or fetched_in[:-8]\n fetched_dir = dirname(fetched_dist)\n\n try:\n # Determine what kind of linking is necessary\n if not extracted_in:\n # If not already extracted, create some dummy\n # data to test with\n rm_rf(fetched_dist)\n ppath = join(fetched_dist, 'info')\n os.makedirs(ppath)\n index_json = join(ppath, 'index.json')\n with open(index_json, 'w'):\n pass\n if config_always_copy or always_copy:\n lt = LINK_COPY\n elif try_hard_link(fetched_dir, prefix, dist):\n lt = LINK_HARD\n elif allow_softlinks and sys.platform != 'win32':\n lt = LINK_SOFT\n else:\n lt = LINK_COPY\n actions[inst.LINK].append('%s %d %s' % (dist, lt, shortcuts))\n\n except (OSError, IOError):\n actions[inst.LINK].append('%s %d %s' % (dist, LINK_COPY, shortcuts))\n finally:\n if not extracted_in:\n # Remove the dummy data\n try:\n rm_rf(fetched_dist)\n except (OSError, IOError):\n pass\n\n return actions\n\n# -------------------------------------------------------------------\n\n\ndef is_root_prefix(prefix):\n return abspath(prefix) == abspath(root_dir)\n\n\ndef add_defaults_to_specs(r, linked, specs, update=False):\n # TODO: This should use the pinning mechanism. But don't change the API:\n # cas uses it.\n if r.explicit(specs):\n return\n log.debug('H0 specs=%r' % specs)\n linked = [d if d.endswith('.tar.bz2') else d + '.tar.bz2' for d in linked]\n names_linked = {r.index[fn]['name']: fn for fn in linked if fn in r.index}\n mspecs = list(map(MatchSpec, specs))\n\n for name, def_ver in [('python', default_python),\n # Default version required, but only used for Python\n ('lua', None)]:\n if any(s.name == name and not s.is_simple() for s in mspecs):\n # if any of the specifications mention the Python/Numpy version,\n # we don't need to add the default spec\n log.debug('H1 %s' % name)\n continue\n\n depends_on = {s for s in mspecs if r.depends_on(s, name)}\n any_depends_on = bool(depends_on)\n log.debug('H2 %s %s' % (name, any_depends_on))\n\n if not any_depends_on:\n # if nothing depends on Python/Numpy AND the Python/Numpy is not\n # specified, we don't need to add the default spec\n log.debug('H2A %s' % name)\n continue\n\n if any(s.is_exact() for s in depends_on):\n # If something depends on Python/Numpy, but the spec is very\n # explicit, we also don't need to add the default spec\n log.debug('H2B %s' % name)\n continue\n\n if name in names_linked:\n # if Python/Numpy is already linked, we add that instead of the\n # default\n log.debug('H3 %s' % name)\n fkey = names_linked[name]\n info = r.index[fkey]\n ver = '.'.join(info['version'].split('.', 2)[:2])\n spec = '%s %s* (target=%s)' % (info['name'], ver, fkey)\n specs.append(spec)\n continue\n\n if name == 'python' and def_ver.startswith('3.'):\n # Don't include Python 3 in the specs if this is the Python 3\n # version of conda.\n continue\n\n if def_ver is not None:\n specs.append('%s %s*' % (name, def_ver))\n log.debug('HF specs=%r' % specs)\n\n\ndef get_pinned_specs(prefix):\n pinfile = join(prefix, 'conda-meta', 'pinned')\n if not exists(pinfile):\n return []\n with open(pinfile) as f:\n return [i for i in f.read().strip().splitlines() if i and not i.strip().startswith('#')]\n\ndef install_actions(prefix, index, specs, force=False, only_names=None, always_copy=False,\n pinned=True, minimal_hint=False, update_deps=True, prune=False,\n shortcuts=False):\n r = Resolve(index)\n linked = r.installed\n\n if auto_update_conda and is_root_prefix(prefix):\n specs.append('conda')\n specs.append('conda-env')\n\n if pinned:\n pinned_specs = get_pinned_specs(prefix)\n log.debug(\"Pinned specs=%s\" % pinned_specs)\n specs += pinned_specs\n\n must_have = {}\n if track_features:\n specs.extend(x + '@' for x in track_features)\n\n pkgs = r.install(specs, linked, update_deps=update_deps)\n\n for fn in pkgs:\n dist = fn[:-8]\n name = name_dist(dist)\n if not name or only_names and name not in only_names:\n continue\n must_have[name] = dist\n\n if is_root_prefix(prefix):\n for name in foreign:\n if name in must_have:\n del must_have[name]\n elif basename(prefix).startswith('_'):\n # anything (including conda) can be installed into environments\n # starting with '_', mainly to allow conda-build to build conda\n pass\n else:\n # disallow conda from being installed into all other environments\n if 'conda' in must_have or 'conda-env' in must_have:\n sys.exit(\"Error: 'conda' can only be installed into the \"\n \"root environment\")\n\n smh = r.dependency_sort(must_have)\n\n actions = ensure_linked_actions(\n smh, prefix,\n index=index if force else None,\n force=force, always_copy=always_copy,\n shortcuts=shortcuts)\n\n if actions[inst.LINK]:\n actions[inst.SYMLINK_CONDA] = [root_dir]\n\n for fkey in sorted(linked):\n dist = fkey[:-8]\n name = name_dist(dist)\n replace_existing = name in must_have and dist != must_have[name]\n prune_it = prune and dist not in smh\n if replace_existing or prune_it:\n add_unlink(actions, dist)\n\n return actions\n\n\ndef remove_actions(prefix, specs, index, force=False, pinned=True):\n r = Resolve(index)\n linked = r.installed\n\n if force:\n mss = list(map(MatchSpec, specs))\n nlinked = {r.package_name(fn): fn[:-8]\n for fn in linked\n if not any(r.match(ms, fn) for ms in mss)}\n else:\n add_defaults_to_specs(r, linked, specs, update=True)\n nlinked = {r.package_name(fn): fn[:-8] for fn in r.remove(specs, linked)}\n\n if pinned:\n pinned_specs = get_pinned_specs(prefix)\n log.debug(\"Pinned specs=%s\" % pinned_specs)\n\n linked = {r.package_name(fn): fn[:-8] for fn in linked}\n\n actions = ensure_linked_actions(r.dependency_sort(nlinked), prefix)\n for old_fn in reversed(r.dependency_sort(linked)):\n dist = old_fn + '.tar.bz2'\n name = r.package_name(dist)\n if old_fn == nlinked.get(name, ''):\n continue\n if pinned and any(r.match(ms, dist) for ms in pinned_specs):\n msg = \"Cannot remove %s becaue it is pinned. Use --no-pin to override.\"\n raise RuntimeError(msg % dist)\n if name == 'conda' and name not in nlinked:\n if any(s.split(' ', 1)[0] == 'conda' for s in specs):\n sys.exit(\"Error: 'conda' cannot be removed from the root environment\")\n else:\n msg = (\"Error: this 'remove' command cannot be executed because it\\n\"\n \"would require removing 'conda' dependencies\")\n sys.exit(msg)\n add_unlink(actions, old_fn)\n\n return actions\n\n\ndef remove_features_actions(prefix, index, features):\n r = Resolve(index)\n linked = r.installed\n\n actions = defaultdict(list)\n actions[inst.PREFIX] = prefix\n _linked = [d + '.tar.bz2' for d in linked]\n to_link = []\n for dist in sorted(linked):\n fn = dist + '.tar.bz2'\n if fn not in index:\n continue\n if r.track_features(fn).intersection(features):\n add_unlink(actions, dist)\n if r.features(fn).intersection(features):\n add_unlink(actions, dist)\n subst = r.find_substitute(_linked, features, fn)\n if subst:\n to_link.append(subst[:-8])\n\n if to_link:\n actions.update(ensure_linked_actions(to_link, prefix))\n return actions\n\n\ndef revert_actions(prefix, revision=-1):\n h = History(prefix)\n h.update()\n try:\n state = h.get_state(revision)\n except IndexError:\n sys.exit(\"Error: no such revision: %d\" % revision)\n\n curr = h.get_state()\n if state == curr:\n return {}\n\n actions = ensure_linked_actions(state, prefix)\n for dist in curr - state:\n add_unlink(actions, dist)\n\n return actions\n\n# ---------------------------- EXECUTION --------------------------\n\n\ndef execute_actions(actions, index=None, verbose=False):\n plan = plan_from_actions(actions)\n with History(actions[inst.PREFIX]):\n inst.execute_instructions(plan, index, verbose)\n\n\ndef update_old_plan(old_plan):\n \"\"\"\n Update an old plan object to work with\n `conda.instructions.execute_instructions`\n \"\"\"\n plan = []\n for line in old_plan:\n if line.startswith('#'):\n continue\n if ' ' not in line:\n raise CondaException(\n \"The instruction '%s' takes at least one argument\" % line\n )\n\n instruction, arg = line.split(' ', 1)\n plan.append((instruction, arg))\n return plan\n\n\ndef execute_plan(old_plan, index=None, verbose=False):\n \"\"\"\n Deprecated: This should `conda.instructions.execute_instructions` instead\n \"\"\"\n plan = update_old_plan(old_plan)\n inst.execute_instructions(plan, index, verbose)\n\n\nif __name__ == '__main__':\n # for testing new revert_actions() only\n from pprint import pprint\n pprint(dict(revert_actions(sys.prefix, int(sys.argv[1]))))\n", "path": "conda/plan.py" } ]
diff --git a/conda/plan.py b/conda/plan.py index 401cc18cba9..e54a336fd13 100644 --- a/conda/plan.py +++ b/conda/plan.py @@ -442,6 +442,7 @@ def install_actions(prefix, index, specs, force=False, only_names=None, always_c if auto_update_conda and is_root_prefix(prefix): specs.append('conda') + specs.append('conda-env') if pinned: pinned_specs = get_pinned_specs(prefix) diff --git a/runtests.sh b/runtests.sh deleted file mode 100755 index 81de1d822f0..00000000000 --- a/runtests.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -echo "Running tests on any file change" -echo -echo "Hit Ctrl+C to stop" -echo -watchmedo shell-command \ - -c "py.test -m 'not slow' $@" \ - -p "*.py" -R diff --git a/sdist.sh b/sdist.sh deleted file mode 100644 index 2abc91d91eb..00000000000 --- a/sdist.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Use this bash script to generate the conda source tarball which gets -# uploaded to PyPI. - -VERSION=$(git describe --dirty) -echo "VERSION: '$VERSION'" - -echo $VERSION | grep dirty -if (( $? )); then - echo "CLEAN" -else - echo "DIRTY" - echo "Error: You must commit your changes before creating a tarball." - exit 1 -fi - -rm -rf build dist docs/build conda.egg-info -rm -f conda/_version.py* -cat <<EOF >conda/__init__.py -__version__ = '$VERSION' -EOF -rm versioneer.py -touch versioneer.py -replace 'version=versioneer.get_version(),' "version='$VERSION'," setup.py -replace 'cmdclass=versioneer.get_cmdclass(),' '' setup.py -replace 'add_activate = True' 'add_activate = False' setup.py -sdist -git reset --hard
mkdocs__mkdocs-2366
build --no-directory-urls feature not working Hello. I created new project using ``` mkdocs new . mkdocs build --no-directory-urls ``` Still when I try to visit home page I am brought to . directory instead of .\index.html Sorry for some russian text :D ![image](https://user-images.githubusercontent.com/3143647/114592047-3055ff80-9c93-11eb-9477-e1beabc8fa66.png) I also tried to modife .yml file like this `use_directory_urls: false` Still not working in "offline" mode ``` pip show mkdocs Name: mkdocs Version: 1.1.2 Summary: Project documentation with Markdown. Home-page: https://www.mkdocs.org Author: Tom Christie Author-email: [email protected] License: BSD Location: /home/demaunt/.local/lib/python3.8/site-packages Requires: Markdown, lunr, Jinja2, PyYAML, click, livereload, tornado Required-by: mkdocs-redirects, mkdocs-minify-plugin, mkdocs-material ```
[ { "content": "import os\nimport logging\nfrom urllib.parse import urlparse, urlunparse, urljoin\nfrom urllib.parse import unquote as urlunquote\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, get_build_date, get_markdown_title\n\nlog = logging.getLogger(__name__)\n\n\nclass Page:\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n self.update_date = get_build_date()\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.title == other.title and\n self.file == other.file\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index and self.file.url == '.'\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event(\n 'page_read_source', page=self, config=config\n )\n if source is None:\n try:\n with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except OSError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc_tokens', []))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\\\')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.register(relpath, \"relpath\", 0)\n", "path": "mkdocs/structure/pages.py" } ]
[ { "content": "import os\nimport logging\nfrom urllib.parse import urlparse, urlunparse, urljoin\nfrom urllib.parse import unquote as urlunquote\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, get_build_date, get_markdown_title\n\nlog = logging.getLogger(__name__)\n\n\nclass Page:\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n self.update_date = get_build_date()\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.title == other.title and\n self.file == other.file\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index and self.file.url in ['.', 'index.html']\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event(\n 'page_read_source', page=self, config=config\n )\n if source is None:\n try:\n with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except OSError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc_tokens', []))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\\\')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.register(relpath, \"relpath\", 0)\n", "path": "mkdocs/structure/pages.py" } ]
diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md index 1258587f87..2c160dc367 100644 --- a/docs/about/release-notes.md +++ b/docs/about/release-notes.md @@ -131,6 +131,7 @@ The `mkdocs.utils.warning_filter` is deprecated and now does nothing. Plugins the corresponding 2.3.9 version (#2306). * Color is now used in log messages to identify errors, warnings and debug messages. +* Bugfix: Identify homepage when `use_directory_urls` is `False` (#2362). ## Version 1.1.2 (2020-05-14) diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py index 27d2867fcd..410410db91 100644 --- a/mkdocs/structure/pages.py +++ b/mkdocs/structure/pages.py @@ -81,7 +81,7 @@ def is_top_level(self): @property def is_homepage(self): - return self.is_top_level and self.is_index and self.file.url == '.' + return self.is_top_level and self.is_index and self.file.url in ['.', 'index.html'] @property def url(self): diff --git a/mkdocs/tests/structure/nav_tests.py b/mkdocs/tests/structure/nav_tests.py index 188bf3db9f..8184923680 100644 --- a/mkdocs/tests/structure/nav_tests.py +++ b/mkdocs/tests/structure/nav_tests.py @@ -51,6 +51,7 @@ def test_nav_no_directory_urls(self): self.assertEqual(str(site_navigation).strip(), expected) self.assertEqual(len(site_navigation.items), 2) self.assertEqual(len(site_navigation.pages), 2) + self.assertEqual(repr(site_navigation.homepage), "Page(title='Home', url='/index.html')") def test_nav_missing_page(self): nav_cfg = [
scikit-hep__pyhf-2068
docs build failing on Pygments lexter warning Hm. Something related to https://github.com/spatialaudio/nbsphinx/issues/24 is breaking the docs build. We're getting ```pytb WARNING: Pygments lexer name 'ipython3' is not known ``` for all the notebooks during the docs build and we fail on warnings. _Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/2066#issuecomment-1329937208_
[ { "content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.7.0', # c.f. PR #1962\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'coverage[toml]>=6.0.0',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.3.4',\n 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n 'pytest-socket>=0.2.0', # c.f. PR #1917\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=5.1.1', # c.f. https://github.com/scikit-hep/pyhf/pull/1926\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'nox',\n 'check-manifest',\n 'codemetapy>=2.3.0',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.7.0', # c.f. PR #1962\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'coverage[toml]>=6.0.0',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.3.4',\n 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n 'pytest-socket>=0.2.0', # c.f. PR #1917\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=5.1.1', # c.f. https://github.com/scikit-hep/pyhf/pull/1926\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'nox',\n 'check-manifest',\n 'codemetapy>=2.3.0',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index a0c48627f5..167ce598ad 100644 --- a/setup.py +++ b/setup.py @@ -58,6 +58,7 @@ 'sphinx-issues', 'sphinx-copybutton>=0.3.2', 'sphinx-togglebutton>=0.3.0', + 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068 ] ) )
openstates__openstates-scrapers-2283
OK failing since at least 2018-05-06 OK has been failing since 2018-05-06 Based on automated runs it appears that OK has not run successfully in 2 days (2018-05-06). ``` /opt/openstates/venv-pupa/lib/python3.5/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>. """) 01:03:53 CRITICAL pupa: Session(s) 2019 Regular Session were reported by Oklahoma.get_session_list() but were not found in Oklahoma.legislative_sessions or Oklahoma.ignored_scraped_sessions. loaded Open States pupa settings... ok (scrape, import) bills: {} people: {} committees: {} ``` Visit http://bobsled.openstates.org for more info.
[ { "content": "from pupa.scrape import Jurisdiction, Organization\nfrom .people import OKPersonScraper\nfrom .committees import OKCommitteeScraper\n# from .events import OKEventScraper\nfrom .bills import OKBillScraper\n\n\nclass Oklahoma(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ok\"\n classification = \"government\"\n name = \"Oklahoma\"\n url = \"http://www.oklegislature.gov/\"\n scrapers = {\n 'people': OKPersonScraper,\n 'committees': OKCommitteeScraper,\n # 'events': OKEventScraper,\n 'bills': OKBillScraper,\n }\n # Sessions are named on OK's website as \"{odd year} regular session\" until the even year,\n # when all data rolls over. For example, even year sessions include all odd-year-session bills.\n # We have opted to name sessions {odd-even} Regular Session and treat them as such.\n # - If adding a new odd-year session, add a new entry and copy the biennium pattern as above\n # - If adding an even-year session, all you'll need to do is:\n # - update the `_scraped_name`\n # - update the session slug in the Bill scraper\n # - ignore the odd-year session\n legislative_sessions = [\n {\n \"_scraped_name\": \"2012 Regular Session\",\n \"identifier\": \"2011-2012\",\n \"name\": \"2011-2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Special Session\",\n \"identifier\": \"2012SS1\",\n \"name\": \"2012 Special Session\"\n },\n {\n \"_scraped_name\": \"2014 Regular Session\",\n \"identifier\": \"2013-2014\",\n \"name\": \"2013-2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013 Special Session\",\n \"identifier\": \"2013SS1\",\n \"name\": \"2013 Special Session\"\n },\n {\n \"_scraped_name\": \"2016 Regular Session\",\n \"identifier\": \"2015-2016\",\n \"name\": \"2015-2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"2017 First Special Session\",\n \"identifier\": \"2017SS1\",\n \"name\": \"2017 First Special Session\"\n },\n {\n \"_scraped_name\": \"2017 Second Special Session\",\n \"identifier\": \"2017SS2\",\n \"name\": \"2017 Second Special Session\"\n },\n {\n \"_scraped_name\": \"2018 Regular Session\",\n \"identifier\": \"2017-2018\",\n \"name\": \"2017-2018 Regular Session\",\n \"start_date\": \"2017-02-06\",\n \"end_date\": \"2018-05-25\",\n },\n ]\n ignored_scraped_sessions = [\n \"2017 Regular Session\",\n \"2015 Regular Session\",\n \"2013 Regular Session\",\n \"2011 Regular Session\",\n \"2010 Regular Session\",\n \"2009 Regular Session\",\n \"2008 Regular Session\",\n \"2007 Regular Session\",\n \"2006 Second Special Session\",\n \"2006 Regular Session\",\n \"2005 Special Session\",\n \"2005 Regular Session\",\n \"2004 Special Session\",\n \"2004 Regular Session\",\n \"2003 Regular Session\",\n \"2002 Regular Session\",\n \"2001 Special Session\",\n \"2001 Regular Session\",\n \"2000 Regular Session\",\n \"1999 Special Session\",\n \"1999 Regular Session\",\n \"1998 Regular Session\",\n \"1997 Regular Session\",\n \"1996 Regular Session\",\n \"1995 Regular Session\",\n \"1994 Second Special Session\",\n \"1994 First Special Session\",\n \"1994 Regular Session\",\n \"1993 Regular Session\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Oklahoma Legislature\"\n lower_chamber_name = \"House\"\n lower_seats = 101\n lower_title = \"Senator\"\n upper_chamber_name = \"Senate\"\n upper_seats = 48\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n from openstates.utils import url_xpath\n sessions = url_xpath('http://webserver1.lsb.state.ok.us/WebApplication2/WebForm1.aspx',\n \"//select[@name='cbxSession']/option/text()\")\n # OK Sometimes appends (Mainsys) to their session listings\n sessions = [s.replace('(Mainsys)', '').strip() for s in sessions]\n return sessions\n", "path": "openstates/ok/__init__.py" } ]
[ { "content": "from pupa.scrape import Jurisdiction, Organization\nfrom .people import OKPersonScraper\nfrom .committees import OKCommitteeScraper\n# from .events import OKEventScraper\nfrom .bills import OKBillScraper\n\n\nclass Oklahoma(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ok\"\n classification = \"government\"\n name = \"Oklahoma\"\n url = \"http://www.oklegislature.gov/\"\n scrapers = {\n 'people': OKPersonScraper,\n 'committees': OKCommitteeScraper,\n # 'events': OKEventScraper,\n 'bills': OKBillScraper,\n }\n # Sessions are named on OK's website as \"{odd year} regular session\" until the even year,\n # when all data rolls over. For example, even year sessions include all odd-year-session bills.\n # We have opted to name sessions {odd-even} Regular Session and treat them as such.\n # - If adding a new odd-year session, add a new entry and copy the biennium pattern as above\n # - If adding an even-year session, all you'll need to do is:\n # - update the `_scraped_name`\n # - update the session slug in the Bill scraper\n # - ignore the odd-year session\n legislative_sessions = [\n {\n \"_scraped_name\": \"2012 Regular Session\",\n \"identifier\": \"2011-2012\",\n \"name\": \"2011-2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Special Session\",\n \"identifier\": \"2012SS1\",\n \"name\": \"2012 Special Session\"\n },\n {\n \"_scraped_name\": \"2014 Regular Session\",\n \"identifier\": \"2013-2014\",\n \"name\": \"2013-2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013 Special Session\",\n \"identifier\": \"2013SS1\",\n \"name\": \"2013 Special Session\"\n },\n {\n \"_scraped_name\": \"2016 Regular Session\",\n \"identifier\": \"2015-2016\",\n \"name\": \"2015-2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"2017 First Special Session\",\n \"identifier\": \"2017SS1\",\n \"name\": \"2017 First Special Session\"\n },\n {\n \"_scraped_name\": \"2017 Second Special Session\",\n \"identifier\": \"2017SS2\",\n \"name\": \"2017 Second Special Session\"\n },\n {\n \"_scraped_name\": \"2018 Regular Session\",\n \"identifier\": \"2017-2018\",\n \"name\": \"2017-2018 Regular Session\",\n \"start_date\": \"2017-02-06\",\n \"end_date\": \"2018-05-25\",\n },\n ]\n ignored_scraped_sessions = [\n \"2019 Regular Session\",\n \"2017 Regular Session\",\n \"2015 Regular Session\",\n \"2013 Regular Session\",\n \"2011 Regular Session\",\n \"2010 Regular Session\",\n \"2009 Regular Session\",\n \"2008 Regular Session\",\n \"2007 Regular Session\",\n \"2006 Second Special Session\",\n \"2006 Regular Session\",\n \"2005 Special Session\",\n \"2005 Regular Session\",\n \"2004 Special Session\",\n \"2004 Regular Session\",\n \"2003 Regular Session\",\n \"2002 Regular Session\",\n \"2001 Special Session\",\n \"2001 Regular Session\",\n \"2000 Regular Session\",\n \"1999 Special Session\",\n \"1999 Regular Session\",\n \"1998 Regular Session\",\n \"1997 Regular Session\",\n \"1996 Regular Session\",\n \"1995 Regular Session\",\n \"1994 Second Special Session\",\n \"1994 First Special Session\",\n \"1994 Regular Session\",\n \"1993 Regular Session\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Oklahoma Legislature\"\n lower_chamber_name = \"House\"\n lower_seats = 101\n lower_title = \"Senator\"\n upper_chamber_name = \"Senate\"\n upper_seats = 48\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n from openstates.utils import url_xpath\n sessions = url_xpath('http://webserver1.lsb.state.ok.us/WebApplication2/WebForm1.aspx',\n \"//select[@name='cbxSession']/option/text()\")\n # OK Sometimes appends (Mainsys) to their session listings\n sessions = [s.replace('(Mainsys)', '').strip() for s in sessions]\n return sessions\n", "path": "openstates/ok/__init__.py" } ]
diff --git a/openstates/ok/__init__.py b/openstates/ok/__init__.py index c4d7e3772c..2159828080 100644 --- a/openstates/ok/__init__.py +++ b/openstates/ok/__init__.py @@ -69,6 +69,7 @@ class Oklahoma(Jurisdiction): }, ] ignored_scraped_sessions = [ + "2019 Regular Session", "2017 Regular Session", "2015 Regular Session", "2013 Regular Session",
opsdroid__opsdroid-1504
Cisco WebEx Teams connector doesn't start # Error ``` INFO opsdroid.logging: ======================================== INFO opsdroid.logging: Started opsdroid 0+unknown. WARNING opsdroid: 'welcome-message: true/false' is missing in configuration.yaml WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted. INFO opsdroid.loader: Cloning hello from remote repository. Traceback (most recent call last): File "/usr/local/bin/opsdroid", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start opsdroid.run() File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 158, in run self.sync_load() File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 177, in sync_load self.eventloop.run_until_complete(self.load()) File "/usr/local/lib/python3.7/asyncio/base_events.py", line 587, in run_until_complete return future.result() File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 185, in load await self.start_connectors(self.modules["connectors"]) File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 319, in start_connectors await self.eventloop.create_task(connector.connect()) File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py", line 53, in connect await self.subscribe_to_rooms() File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py", line 99, in subscribe_to_rooms secret=self.secret, File "/usr/local/lib/python3.7/site-packages/webexteamssdk/api/webhooks.py", line 159, in create json_data = self._session.post(API_ENDPOINT, json=post_data) File "/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py", line 401, in post **kwargs) File "/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py", line 258, in request check_response_code(response, erc) File "/usr/local/lib/python3.7/site-packages/webexteamssdk/utils.py", line 220, in check_response_code raise ApiError(response) webexteamssdk.exceptions.ApiError: [400] Bad Request - POST failed: HTTP/1.1 400 Bad Request (url = https://webhook-engine-a.wbx2.com/webhook-engine/api/v1/webhooks, request/response TrackingId = ROUTER_5ECD21B0-63B3-01BB-00D6-B2CAA80F00D6, error = 'Invalid targetUrl: Illegal character in path at index 0: <function Url at 0x7fd36ce31f80>/connector/webexteams') ``` # How to reproduce * Create `configuration.yaml` with the following content: ``` connectors: webexteams: token: MYBOTACCESSTOKEN webhook-url: https://my-webhook-url.com # Seem that webhook-url is not relevant for the error message skills: hello: ``` * Create `debug.sh` with the following content: ``` docker run --rm -ti -p 8080:8080 \ -v `pwd`/configuration.yaml:/root/.config/opsdroid/configuration.yaml:ro \ opsdroid/opsdroid:v0.18.0 sh ``` * `chmod +x debug.sh` * `./debug.sh` * (in the container) `opsdroid start`
[ { "content": "\"\"\"A connector for Webex Teams.\"\"\"\nimport json\nimport logging\nimport uuid\nimport os\n\nimport aiohttp\n\nfrom webexteamssdk import WebexTeamsAPI\nfrom voluptuous import Required, Url\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"webhook-url\"): Url, Required(\"token\"): str}\n\n\nclass ConnectorWebexTeams(Connector):\n \"\"\"A connector for Webex Teams.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create a connector.\"\"\"\n _LOGGER.debug(_(\"Loaded WebEx Teams Connector.\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"webexteams\"\n self.config = config\n self.opsdroid = opsdroid\n self.default_target = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.bot_webex_id = None\n self.secret = uuid.uuid4().hex\n self.people = {}\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n try:\n self.api = WebexTeamsAPI(\n access_token=self.config[\"token\"],\n proxies={\n \"http\": os.environ.get(\"HTTP_PROXY\"),\n \"https\": os.environ.get(\"HTTPS_PROXY\"),\n },\n )\n except KeyError:\n\n _LOGGER.error(_(\"Must set access-token for WebEx Teams Connector.\"))\n\n return\n\n await self.clean_up_webhooks()\n await self.subscribe_to_rooms()\n await self.set_own_id()\n\n async def webexteams_message_handler(self, request):\n \"\"\"Handle webhooks from the Webex Teams api.\"\"\"\n _LOGGER.debug(_(\"Handling message from WebEx Teams.\"))\n req_data = await request.json()\n\n _LOGGER.debug(req_data)\n\n msg = self.api.messages.get(req_data[\"data\"][\"id\"])\n\n if req_data[\"data\"][\"personId\"] != self.bot_webex_id:\n person = await self.get_person(req_data[\"data\"][\"personId\"])\n\n try:\n message = Message(\n text=msg.text,\n user=person.displayName,\n target={\"id\": msg.roomId, \"type\": msg.roomType},\n connector=self,\n )\n await self.opsdroid.parse(message)\n except KeyError as error:\n _LOGGER.error(error)\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=201)\n\n async def clean_up_webhooks(self):\n \"\"\"Remove all existing webhooks.\"\"\"\n for webhook in self.api.webhooks.list():\n self.api.webhooks.delete(webhook.id)\n\n async def subscribe_to_rooms(self):\n \"\"\"Create webhooks for all rooms.\"\"\"\n _LOGGER.debug(_(\"Creating Webex Teams webhook.\"))\n webhook_endpoint = \"/connector/webexteams\"\n self.opsdroid.web_server.web_app.router.add_post(\n webhook_endpoint, self.webexteams_message_handler\n )\n\n self.api.webhooks.create(\n name=\"opsdroid\",\n targetUrl=\"{}{}\".format(self.config.get(\"webhook-url\"), webhook_endpoint),\n resource=\"messages\",\n event=\"created\",\n secret=self.secret,\n )\n\n async def get_person(self, personId):\n \"\"\"Get a person's info from the api or cache.\"\"\"\n if personId not in self.people:\n self.people[personId] = self.api.people.get(personId)\n return self.people[personId]\n\n async def set_own_id(self):\n \"\"\"Get the bot id and set it in the class.\"\"\"\n self.bot_webex_id = self.api.people.me().id\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n pass # Listening is handled by the aiohttp web server\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n self.api.messages.create(message.target[\"id\"], text=message.text)\n", "path": "opsdroid/connector/webexteams/__init__.py" } ]
[ { "content": "\"\"\"A connector for Webex Teams.\"\"\"\nimport json\nimport logging\nimport uuid\nimport os\n\nimport aiohttp\n\nfrom webexteamssdk import WebexTeamsAPI\nfrom voluptuous import Required, Url\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"webhook-url\"): Url(), Required(\"token\"): str}\n\n\nclass ConnectorWebexTeams(Connector):\n \"\"\"A connector for Webex Teams.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create a connector.\"\"\"\n _LOGGER.debug(_(\"Loaded WebEx Teams Connector.\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"webexteams\"\n self.config = config\n self.opsdroid = opsdroid\n self.default_target = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.bot_webex_id = None\n self.secret = uuid.uuid4().hex\n self.people = {}\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n try:\n self.api = WebexTeamsAPI(\n access_token=self.config[\"token\"],\n proxies={\n \"http\": os.environ.get(\"HTTP_PROXY\"),\n \"https\": os.environ.get(\"HTTPS_PROXY\"),\n },\n )\n except KeyError:\n\n _LOGGER.error(_(\"Must set access-token for WebEx Teams Connector.\"))\n\n return\n\n await self.clean_up_webhooks()\n await self.subscribe_to_rooms()\n await self.set_own_id()\n\n async def webexteams_message_handler(self, request):\n \"\"\"Handle webhooks from the Webex Teams api.\"\"\"\n _LOGGER.debug(_(\"Handling message from WebEx Teams.\"))\n req_data = await request.json()\n\n _LOGGER.debug(req_data)\n\n msg = self.api.messages.get(req_data[\"data\"][\"id\"])\n\n if req_data[\"data\"][\"personId\"] != self.bot_webex_id:\n person = await self.get_person(req_data[\"data\"][\"personId\"])\n\n try:\n message = Message(\n text=msg.text,\n user=person.displayName,\n target={\"id\": msg.roomId, \"type\": msg.roomType},\n connector=self,\n )\n await self.opsdroid.parse(message)\n except KeyError as error:\n _LOGGER.error(error)\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=201)\n\n async def clean_up_webhooks(self):\n \"\"\"Remove all existing webhooks.\"\"\"\n for webhook in self.api.webhooks.list():\n self.api.webhooks.delete(webhook.id)\n\n async def subscribe_to_rooms(self):\n \"\"\"Create webhooks for all rooms.\"\"\"\n _LOGGER.debug(_(\"Creating Webex Teams webhook.\"))\n webhook_endpoint = \"/connector/webexteams\"\n self.opsdroid.web_server.web_app.router.add_post(\n webhook_endpoint, self.webexteams_message_handler\n )\n\n self.api.webhooks.create(\n name=\"opsdroid\",\n targetUrl=\"{}{}\".format(self.config.get(\"webhook-url\"), webhook_endpoint),\n resource=\"messages\",\n event=\"created\",\n secret=self.secret,\n )\n\n async def get_person(self, personId):\n \"\"\"Get a person's info from the api or cache.\"\"\"\n if personId not in self.people:\n self.people[personId] = self.api.people.get(personId)\n return self.people[personId]\n\n async def set_own_id(self):\n \"\"\"Get the bot id and set it in the class.\"\"\"\n self.bot_webex_id = self.api.people.me().id\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n pass # Listening is handled by the aiohttp web server\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n self.api.messages.create(message.target[\"id\"], text=message.text)\n", "path": "opsdroid/connector/webexteams/__init__.py" } ]
diff --git a/opsdroid/connector/webexteams/__init__.py b/opsdroid/connector/webexteams/__init__.py index bb94ec2c9..eda32060a 100644 --- a/opsdroid/connector/webexteams/__init__.py +++ b/opsdroid/connector/webexteams/__init__.py @@ -14,7 +14,7 @@ _LOGGER = logging.getLogger(__name__) -CONFIG_SCHEMA = {Required("webhook-url"): Url, Required("token"): str} +CONFIG_SCHEMA = {Required("webhook-url"): Url(), Required("token"): str} class ConnectorWebexTeams(Connector): diff --git a/tests/test_connector_webexteams.py b/tests/test_connector_webexteams.py index 0c54bac93..73b5c18c8 100755 --- a/tests/test_connector_webexteams.py +++ b/tests/test_connector_webexteams.py @@ -24,6 +24,10 @@ def test_init(self): self.assertEqual("webexteams", connector.name) self.assertEqual("opsdroid", connector.bot_name) + def test_webhook_url_is_valid(self): + connector = ConnectorWebexTeams({"webhook-url": "https://example.com"}) + assert connector.config.get("webhook-url").startswith("https") + def test_missing_api_key(self): """Test that creating without an API without config raises an error.""" with self.assertRaises(TypeError):
sunpy__sunpy-2561
Document the CI DOCUMENT ALL THE THINGS
[ { "content": "from __future__ import absolute_import\n\nfrom sunpy.net.attr import AttrWalker, AttrAnd, AttrOr\nfrom sunpy.net.vso.attrs import _VSOSimpleAttr\nfrom sunpy.net.vso.attrs import Time, Wavelength\n\n\n__all__ = ['Series', 'Protocol', 'Notify', 'Compression', 'Segment']\n\n\nclass Series(_VSOSimpleAttr):\n \"\"\"\n The JSOC Series to Download.\n\n See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`\n for a list of series'.\n \"\"\"\n pass\n\n\nclass Segment(_VSOSimpleAttr):\n \"\"\"\n Segments choose which files to download when there are more than\n one present for each record e.g. 'image'\n \"\"\"\n pass\n\n\nclass Protocol(_VSOSimpleAttr):\n \"\"\"\n The type of download to request one of\n (\"FITS\", \"JPEG\", \"MPG\", \"MP4\", or \"as-is\").\n Only FITS is supported, the others will require extra keywords.\n \"\"\"\n pass\n\n\nclass Notify(_VSOSimpleAttr):\n \"\"\"\n An email address to get a notification to when JSOC has staged your request\n \"\"\"\n\n def __init__(self, value):\n super(Notify, self).__init__(value)\n if value.find('@') == -1:\n raise ValueError(\"Notify attribute must contain an '@' symbol \"\n \"to be a valid email address\")\n self.value = value\n\n\nclass Compression(_VSOSimpleAttr):\n \"\"\"\n Compression format for requested files.\n\n 'rice' or None, download FITS files with RICE compression.\n \"\"\"\n pass\n\n\nwalker = AttrWalker()\n\n\[email protected]_creator(AttrAnd, _VSOSimpleAttr, Time)\ndef _create(wlk, query):\n\n map_ = {}\n wlk.apply(query, map_)\n return [map_]\n\n\[email protected]_applier(AttrAnd)\ndef _apply(wlk, query, imap):\n\n for iattr in query.attrs:\n wlk.apply(iattr, imap)\n\n\[email protected]_applier(_VSOSimpleAttr)\ndef _apply1(wlk, query, imap):\n\n imap[query.__class__.__name__.lower()] = query.value\n\n\[email protected]_applier(Time)\ndef _apply2(wlk, query, imap):\n imap['start_time'] = query.start\n imap['end_time'] = query.end\n\n\[email protected]_applier(Wavelength)\ndef _apply_wave(wlk, query, imap):\n if query.min != query.max:\n raise ValueError(\n \"For JSOC queries Wavelength.min must equal Wavelength.max\")\n\n imap[query.__class__.__name__.lower()] = query.min\n\n\[email protected]_creator(AttrOr)\ndef _create1(wlk, query):\n\n qblocks = []\n for iattr in query.attrs:\n qblocks.extend(wlk.create(iattr))\n\n return qblocks\n", "path": "sunpy/net/jsoc/attrs.py" } ]
[ { "content": "from __future__ import absolute_import\n\nfrom sunpy.net.attr import AttrWalker, AttrAnd, AttrOr\nfrom sunpy.net.vso.attrs import _VSOSimpleAttr\nfrom sunpy.net.vso.attrs import Time, Wavelength\n\n\n__all__ = ['Series', 'Protocol', 'Notify', 'Compression', 'Segment']\n\n\nclass Series(_VSOSimpleAttr):\n \"\"\"\n The JSOC Series to Download.\n\n This is the list of `Series <http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`.\n \"\"\"\n pass\n\n\nclass Segment(_VSOSimpleAttr):\n \"\"\"\n Segments choose which files to download when there are more than\n one present for each record e.g. 'image'\n \"\"\"\n pass\n\n\nclass Protocol(_VSOSimpleAttr):\n \"\"\"\n The type of download to request one of\n (\"FITS\", \"JPEG\", \"MPG\", \"MP4\", or \"as-is\").\n Only FITS is supported, the others will require extra keywords.\n \"\"\"\n pass\n\n\nclass Notify(_VSOSimpleAttr):\n \"\"\"\n An email address to get a notification to when JSOC has staged your request\n \"\"\"\n\n def __init__(self, value):\n super(Notify, self).__init__(value)\n if value.find('@') == -1:\n raise ValueError(\"Notify attribute must contain an '@' symbol \"\n \"to be a valid email address\")\n self.value = value\n\n\nclass Compression(_VSOSimpleAttr):\n \"\"\"\n Compression format for requested files.\n\n 'rice' or None, download FITS files with RICE compression.\n \"\"\"\n pass\n\n\nwalker = AttrWalker()\n\n\[email protected]_creator(AttrAnd, _VSOSimpleAttr, Time)\ndef _create(wlk, query):\n\n map_ = {}\n wlk.apply(query, map_)\n return [map_]\n\n\[email protected]_applier(AttrAnd)\ndef _apply(wlk, query, imap):\n\n for iattr in query.attrs:\n wlk.apply(iattr, imap)\n\n\[email protected]_applier(_VSOSimpleAttr)\ndef _apply1(wlk, query, imap):\n\n imap[query.__class__.__name__.lower()] = query.value\n\n\[email protected]_applier(Time)\ndef _apply2(wlk, query, imap):\n imap['start_time'] = query.start\n imap['end_time'] = query.end\n\n\[email protected]_applier(Wavelength)\ndef _apply_wave(wlk, query, imap):\n if query.min != query.max:\n raise ValueError(\n \"For JSOC queries Wavelength.min must equal Wavelength.max\")\n\n imap[query.__class__.__name__.lower()] = query.min\n\n\[email protected]_creator(AttrOr)\ndef _create1(wlk, query):\n\n qblocks = []\n for iattr in query.attrs:\n qblocks.extend(wlk.create(iattr))\n\n return qblocks\n", "path": "sunpy/net/jsoc/attrs.py" } ]
diff --git a/.rtd-environment.yml b/.rtd-environment.yml deleted file mode 100644 index 7b58577039d..00000000000 --- a/.rtd-environment.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: sunpy-rtd -channels: - - conda-forge -dependencies: -- astropy>=2.0 -- beautifulsoup4 -- cairo -- contextlib2 -- cycler -- cython -- db -- decorator -- fontconfig -- freetype -- funcsigs -- jbig -- jpeg -- libgfortran -- libpng -- libtiff -- libxml2 -- libxslt -- lxml -- matplotlib -- mock -- networkx -- numpy -- openblas -- openssl -- pandas -- pbr -- pillow -- pip -- pixman -- py -- pycairo -- pyparsing -- pyqt -- pytest -- python=3.5 -- python-dateutil -- pytz -- qt -- readline -- requests -- scikit-image -- scipy -- setuptools -- sip -- six -- sqlalchemy -- sqlite -- suds-jurko -- sphinx=1.5.6 -- sphinx_rtd_theme -- tk -- wheel -- xz -- zlib -- openjpeg -- glymur -- pip: - - sunpy-sphinx-theme - - git+https://github.com/sphinx-gallery/sphinx-gallery - - sphinx-astropy diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9712745d2f2..0b1b669208f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,23 +5,83 @@ New Features ------------ - Added TimeUTime class to support utime. [#2409] +- Example for fine-grained use of ticks and grids [#2435] +- Maintiners Workflow Guide [#2411] +- Decorator to append and/or prepend doc strings [#2386] +- Adding `python setup.py test --figure-only` [#2557] API Changes ----------- -- ``sunpy.coordinates.representation`` has been removed. Longitude wrapping is - now done in the constructor of the frames. [#2431] -- Propagation of ``obstime`` in the coordinate frame transformation has changed, - this means in general when transforming directly between frames (not +- ``sunpy.coordinates.representation`` has been removed. Longitude wrapping is now done in the constructor of the frames. [#2431] +- Propagation of ``obstime`` in the coordinate frame transformation has changed, this means in general when transforming directly between frames (not ``SkyCoord``) you will have to specify ``obstime`` in more places. [#2461] -- Transforming between Heliographic Stonyhurst and Carrington now requires that - ``obstime`` be defined and the same on both the input and output frames. [#2461] +- Transforming between Heliographic Stonyhurst and Carrington now requires that ``obstime`` be defined and the same on both the input and output frames. [#2461] +- Removed the figure return from .peek() [#2487] Bug Fixes --------- +- Improve TimeSeriesBase docstring [#2399] +- Validate that pytest-doctestplus is installed [#2388] +- Fix use of self.wcs in plot in mapbase [#2398] +- Updated docstring with pointer to access EVE data for other levels [#2402] +- Fix broken links and redirections in documentation [#2403] +- Fixes Documentation changes due to NumPy 1.14 [#2404] +- Added docstrings to functions in dowload.py [#2415] +- Clean up database doc [#2414] +- rhessi.py now uses sunpy.io instead of astropy.io [#2416] +- Remove Gamma usage in Map [#2424] +- Changed requirements to python-dateutil [#2426] +- Clarify coordinate system definitions [#2429] +- Improve Map Peek when using draw_grid [#2442] +- Add HCC --> HGS test [#2443] +- Testing the transformation linking SunPy and Astropy against published values [#2454] +- Fixed title bug in sunpy.timeseries.rhessi [#2477] +- Allow LineAnimator to accept a varying x-axis [#2491] +- Indexing Bug Fix to LineAnimator [#2560] +- Output sphinx warnings to stdout [#2553] +- Docstring improvement for LineAnimator [#2514] +- move the egg_info builds to circleci [#2512] +- Added tests for TraceMap [#2504] - Fix HGS frame constructor and HPC ``calculate_distance`` with SkyCoord constructor. [#2463] +- removed `wavelnth` keyword in meta desc of Maps to avoid using non standard FITS keyword like `nan` [#2456] +- The documentation build now uses the Sphinx configuration from sphinx-astropy rather than from astropy-helpers.[#2494] +- Migrate to hypothesis.strategies.datetimes [#2368] +- Prevent a deprecation warning due to truth values of Quantity [#2358] + +0.8.5 +===== + +Bug Fixes +--------- + +- Removed AstropyDeprecationWarning from sunpy.coordinates.representation [#2476] +- Fix for NorthOffsetFrame under Astropy 3.0 [#2486] +- Fix lightcurve tests under numpy dev [#2505] +- Updated depecration link of radiospectra [#2481] +- Fixed Padding values in some of the documentation pages [#2497] +- Move documentation build to circleci [#2509] +- Fix Issue #2470 hgs_to_hcc(heliogcoord, heliocframe) [#2502] +- Fixing CompositeMap object so that it respects masked maps [#2492] + +0.8.4 +===== + +Bug Fixes +--------- + +- Improve detection of ``SkyCoord`` frame instantiation when distance is + `1*u.one`. This fixes a plotting bug with ``WCSAxes`` in Astropy 3.0 [#2465] - removed `wavelnth` keyword in meta desc of Maps to avoid using non standard FITS keyword like `nan` [#2427] +- Change the default units for HPC distance from `u.km` to `None`. [#2465] + +0.8.3 +===== + +Bug Fixes +--------- + - `~sunpy.net.dataretriever.clients.XRSClient` now reports time ranges of files correctly. [#2364] - Make parse_time work with datetime64s and pandas series [#2370] - CompositeMap axes scaling now uses map spatial units [#2310] @@ -37,13 +97,35 @@ Bug Fixes - Travis CI fix for numpy-dev build [#2340] - Updated masking brightest pixel example [#2338] - Changed TRAVIS cronjobs [#2338] -- Support array values for `obstime` for coordinates and transformations [#2342] +- Support array values for `obstime` for coordinates and transformations [#2342] [#2346] +- Updated Gallery off limb enhance example [#2337] +- Documentation fixes for VSO [#2354] [#2353] +- All tests within the documentation have been fixed [#2343] +- Change to using pytest-remotedata for our online tests [#2345] +- Fixed upstream astropy/numpy documentation issues [#2359] +- Documentation for Map improved [#2361] +- Fix the output units of pixel_to_world [#2362] +- Documentation for Database improved [#2355] +- Added test for mapsave [#2365] +- Documentation for Sun improved [#2369] + +0.8.2 +===== + +Bug Fixes +--------- + - Shows a warning if observation time is missing [#2293] - Updates MapCube to access the correct properties of the namedtuple SpatialPair [#2297] + +0.8.1 +====== + +Bug fixes +--------- + - Fixed TimeSeries test failures due to missing test files [#2273] - Refactored a GOES test to avoid a Py3.6 issue [#2276] -- The documentation build now uses the Sphinx configuration from sphinx-astropy - rather than from astropy-helpers.[#2494] 0.8.0 ====== diff --git a/CITATION.md b/CITATION.md deleted file mode 100644 index 2af79ca25f2..00000000000 --- a/CITATION.md +++ /dev/null @@ -1,42 +0,0 @@ -#Acknowledging or Citing SunPy - -###If you have used SunPy in your scientific work we would appreciate it if you would acknowledge it. -The continued growth and development of SunPy is dependent on the community being aware of the use SunPy. If you use SunPy, we therefore ask that you acknowledge SunPy appropriately in a publication, presentation, poster, or talk. - -* **For a publication**, we recommend the following line be added to the conclusion or acknowledgements: - - *This research has made use of SunPy, an open-source and free community-developed solar data analysis package written in Python (citation).* - - The citation is to the [SunPy v0.5 paper](https://iopscience.iop.org/article/10.1088/1749-4699/8/1/014009)/[arXiv(open access)](https://arxiv.org/abs/1505.02563). If the journal allows please also include a link to sunpy.org. If you have the time, please email us to let us know about your paper, as we maintain a [public list](https://www.zotero.org/groups/sunpy_-_python_for_solar_physicists) of papers on [Zotero](https://www.zotero.org/). - A BibTeX entry for LaTeX users is: - -``` - @ARTICLE{2015CS&D....8a4009S, - author = {{SunPy Community}, T. and {Mumford}, S.~J. and {Christe}, S. and - {P{\'e}rez-Su{\'a}rez}, D. and {Ireland}, J. and {Shih}, A.~Y. and - {Inglis}, A.~R. and {Liedtke}, S. and {Hewett}, R.~J. and {Mayer}, F. and - {Hughitt}, K. and {Freij}, N. and {Meszaros}, T. and {Bennett}, S.~M. and - {Malocha}, M. and {Evans}, J. and {Agrawal}, A. and {Leonard}, A.~J. and - {Robitaille}, T.~P. and {Mampaey}, B. and {Iv{\'a}n Campos-Rozo}, J. and - {Kirk}, M.~S.}, - title = "{SunPy{\mdash}Python for solar physics}", - journal = {Computational Science and Discovery}, - archivePrefix = "arXiv", - eprint = {1505.02563}, - primaryClass = "astro-ph.IM", - year = 2015, - month = jan, - volume = 8, - number = 1, - eid = {014009}, - pages = {014009}, - doi = {10.1088/1749-4699/8/1/014009}, - adsurl = {http://adsabs.harvard.edu/abs/2015CS%26D....8a4009S}, - adsnote = {Provided by the SAO/NASA Astrophysics Data System} - } - -``` - -* **For a poster, talks, or project websites**, please include the [Sunpy logo](http://sunpy.org/about/#acknowledging) on the title, conclusion slide, or about page. For websites please link the image to [sunpy.org](http://sunpy.org/). Other versions of the logo are available in the [sunpy-logo repository](https://github.com/sunpy/sunpy-logo/). - -Thank you, in advance, for your support. diff --git a/CITATION.rst b/CITATION.rst new file mode 100644 index 00000000000..c19c079ea77 --- /dev/null +++ b/CITATION.rst @@ -0,0 +1,60 @@ +Acknowledging or Citing SunPy +============================= + +If you have used SunPy in your scientific work we would appreciate it if you would acknowledge it. +The continued growth and development of SunPy is dependent on the community being aware of the use SunPy. +If you use SunPy, we therefore ask that you acknowledge SunPy appropriately in a publication, presentation, poster, or talk. + +- **For a publication**, we recommend the following line be added to + the conclusion or acknowledgements: + + *This research has made use of SunPy, an open-source and free + community-developed solar data analysis package written in Python + (citation).* + + The citation is to the `SunPy v0.5 paper`_/`arXiv(open access)`_. If + the journal allows please also include a link to sunpy.org. If you + have the time, please email us to let us know about your paper, as we + maintain a `public list`_ of papers on `Zotero`_. A BibTeX entry for + LaTeX users is: + +.. code:: bibtex + + @ARTICLE{2015CS&D....8a4009S, + author = {{SunPy Community}, T. and {Mumford}, S.~J. and {Christe}, S. and + {P{\'e}rez-Su{\'a}rez}, D. and {Ireland}, J. and {Shih}, A.~Y. and + {Inglis}, A.~R. and {Liedtke}, S. and {Hewett}, R.~J. and {Mayer}, F. and + {Hughitt}, K. and {Freij}, N. and {Meszaros}, T. and {Bennett}, S.~M. and + {Malocha}, M. and {Evans}, J. and {Agrawal}, A. and {Leonard}, A.~J. and + {Robitaille}, T.~P. and {Mampaey}, B. and {Iv{\'a}n Campos-Rozo}, J. and + {Kirk}, M.~S.}, + title = "{SunPy{\mdash}Python for solar physics}", + journal = {Computational Science and Discovery}, + archivePrefix = "arXiv", + eprint = {1505.02563}, + primaryClass = "astro-ph.IM", + year = 2015, + month = jan, + volume = 8, + number = 1, + eid = {014009}, + pages = {014009}, + doi = {10.1088/1749-4699/8/1/014009}, + adsurl = {http://adsabs.harvard.edu/abs/2015CS%26D....8a4009S}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} + } + +- **For a poster, talks, or project websites**, please include the + `Sunpy logo`_ on the title, conclusion slide, or about page. For + websites please link the image to `sunpy.org`_. Other versions of the + logo are available in the `sunpy-logo repository`_. + +Thank you, in advance, for your support. + +.. _SunPy v0.5 paper: https://iopscience.iop.org/article/10.1088/1749-4699/8/1/014009 +.. _arXiv(open access): https://arxiv.org/abs/1505.02563 +.. _public list: https://www.zotero.org/groups/sunpy_-_python_for_solar_physicists +.. _Zotero: https://www.zotero.org/ +.. _Sunpy logo: http://sunpy.org/about/#acknowledging +.. _sunpy.org: http://sunpy.org/ +.. _sunpy-logo repository: https://github.com/sunpy/sunpy-logo/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 327cd29204c..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,97 +0,0 @@ -How to Contribute to SunPy --------------------------------- - -Thanks for your interest in contributing to SunPy! - -SunPy is an open project that enourages everyone to contribute in any way possible. -Although this document mainly focuses on how to make contributions to SunPy code -and documentation, there are many other ways to get involved with the SunPy -community. A good place to start is by saying hello on -the [mailing list](https://groups.google.com/forum/#!forum/sunpy) -or [matrix](https://riot.im/app/#/room/#sunpy-general:matrix.org). - - -## Reporting Issues - -If you have found a problem when using SunPy the best way to report it and get -some help is by opening an [issue](https://github.com/sunpy/sunpy/issues) on our -GitHub issue tracker. - -You will need to sign in to [GitHub](github.com) to report an issue. If you are not -already a member of Github, you will have to join. Joining GitHub will also -make it easier to report and track issues in the future. - -If you do not want to join Github, then another way to report your issue is -email the SunPy developers -list -[[email protected]](https://groups.google.com/forum/#!forum/sunpy-dev). - - -Issue reports should provide a short description of the issue. If you are -reporting a bug, please post a code sample so others can attempt to reproduce -the error. Please also post any error output generated by the bug. For a good -example of how to do this see issue [#1191](https://github.com/sunpy/sunpy/issues/1191). - -If you are making a feature request, please post as much information as possible -regarding the feature you would like to see in SunPy. - - -## Finding Things to Contribute to - -If you want to contribute to SunPy but don't know where to start, we maintain a -list of issues that are good as a starting place, these can be found under the -[package-novice](https://github.com/sunpy/sunpy/issues?q=is%3Aissue+is%3Aopen+label%3Apackage-novice) -label. - - -## Making a Code or Documentation Contribution - -To make a contribution to the SunPy code or documentation you will need to use -git and GitHub, which are the tools SunPy uses to manage changes to the code and -documentation. - -1. Get a [GitHub Account](https://github.com/join) -1. [Fork](https://help.github.com/articles/fork-a-repo) SunPy on GitHub -1. Install the developer version of SunPy using git. -1. Make your changes and push to your fork. -1. Create a [pull request](https://help.github.com/articles/creating-a-pull-request-from-a-fork/). - -If any of the above steps are unfamiliar to you check out our -[Developers Guide](http://docs.sunpy.org/en/latest/dev_guide/index.html). (Suggesting -improvements to the developers guide is an excellent way to contribute!) - - -## Pull Request Review - -When you submit a pull request it can be reviewed and commented on by anyone. -Everyone's contributions are reviewed, and no-one's contributions are accepted -in to SunPy without being reviewed. We do this to try to maintain a consistent -coding style and high quality code. - -For the code to be accepted into SunPy it must meet the following criteria: - -* Conform to code quality standards, primarily [PEP8](http://legacy.python.org/dev/peps/pep-0008/). -* Have close to 100% unit test coverage (see [testing](http://docs.sunpy.org/en/latest/dev.html#testing)). -* Have API documentation generated by astropy's automodapi extenstion where appropriate. -* Contain an entry in the [CHANGELOG.md](https://github.com/sunpy/sunpy/blob/master/CHANGELOG.md) file. -* Be approved by at least two SunPy contributors. - -When a Pull Request is submitted to SunPy a set of automated checks are run to -ensure that there are adequate tests which pass and PEP 8 is adhered to. - - -## Pull Requests to non-master Branches - -From time to time, large feature development work may occur in branches other than master, and all -maintained releases currently have a branch i.e. `0.6`. -When making a PR to one of these branches (not master), please put the name of the branch in square brackets at -the beginning of the PR, i.e. `[0.6] My bug fix`. This makes it easier to filter and review these PRs on GitHub. - -## More Information - -For more information on contributing to SunPy check out the following: - -* Our [Developers Guide](http://docs.sunpy.org/en/latest/dev_guide/index.html). -* The [Developers Mailing List](https://groups.google.com/forum/#!forum/sunpy-dev) -* The [Pull Request Review](https://github.com/sunpy/sunpy/wiki/Pull-Request-Review-Procedure) wiki page. -* Our matrix channel [#sunpy-general:matrix.org](https://riot.im/app/#/room/#sunpy-general:matrix.org) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 00000000000..70ef954f6aa --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,87 @@ +How to Contribute to SunPy +-------------------------- + +Thanks for your interest in contributing to SunPy! + +SunPy is an open project that encourages everyone to contribute in any way possible. +Although this document mainly focuses on how to make contributions to SunPy code and documentation, there are many other ways to get involved with the SunPy community. +A good place to start is by saying hello in our `matrix`_ chat room or the `mailing list`_. + +This is a condensed version from our `Developers Guide`_. + +Reporting Issues +---------------- + +If you have found a problem when using SunPy the best way to report it and get some help is by opening an `issue`_ on our GitHub issue tracker. + +You will need to sign in to `GitHub`_ to report an issue. +If you are not already a member of Github, you will have to join. +Joining GitHub will also make it easier to report and track issues in the future. + +If you do not want to join Github, then another way to report your issue +is email the SunPy developers list `[email protected]`_. + +Issue reports should provide a short description of the issue. +If you are reporting a bug, please post a code sample so others can attempt to reproduce the error. +Please also post any error output generated by the bug. +For a good example of how to do this see issue `#1191`_. + +If you are making a feature request, please post as much information as possible regarding the feature you would like to see in SunPy. + +Finding Things to Contribute to +------------------------------- + +If you want to contribute to SunPy but don’t know where to start, we maintain a list of issues that are good as a starting place, these can be found under the `Package Novice`_ label. + +Making a Code or Documentation Contribution +------------------------------------------- + +To make a contribution to the SunPy code or documentation you will need to use git and GitHub, which are the tools SunPy uses to manage changes to the code and documentation. + +1. Get a `GitHub Account`_ +2. `Fork`_ SunPy on GitHub +3. Install the developer version of SunPy using git. +4. Make your changes and push to your fork. +5. Create a `pull request`_. + +If any of the above steps are unfamiliar to you check out our `Developers Guide`_. +(Suggesting improvements to the developers guide is an excellent way to contribute!) + +Pull Request Review +------------------- + +When you submit a pull request it can be reviewed and commented on by anyone. +Everyone’s contributions are reviewed, and no-one’s contributions are accepted in to SunPy without being reviewed. +We do this to try to maintain a consistent coding style and high quality code. + +Pull Requests to non-master Branches +------------------------------------ + +From time to time, large feature development work may occur in branches other than master, and all maintained releases currently have a branch i.e. ``0.8``. +When making a PR to one of these branches (not master), please put the name of the branch in square brackets at the beginning of the PR, i.e. ``[0.8] My bug fix``. +This makes it easier to filter and review these PRs on GitHub. + +More Information +---------------- + +For more information on contributing to SunPy check out the following: + +- Our `Developers Guide`_. +- The `Developers Mailing List`_ +- The `Pull Request Review`_ process. +- Our matrix channel `#sunpy-general:matrix.org`_ + +.. _mailing list: https://groups.google.com/forum/#!forum/sunpy +.. _matrix: https://riot.im/app/#/room/#sunpy-general:matrix.org +.. _issue: https://github.com/sunpy/sunpy/issues +.. _GitHub: https://github.com +.. [email protected]: https://groups.google.com/forum/#!forum/sunpy-dev +.. _#1191: https://github.com/sunpy/sunpy/issues/1191 +.. _Package Novice: https://github.com/sunpy/sunpy/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Package+Novice%22 +.. _GitHub Account: https://github.com/join +.. _Fork: https://help.github.com/articles/fork-a-repo +.. _pull request: https://help.github.com/articles/creating-a-pull-request-from-a-fork/ +.. _Developers Guide: http://docs.sunpy.org/en/latest/dev_guide/index.html +.. _Developers Mailing List: https://groups.google.com/forum/#!forum/sunpy-dev +.. _Pull Request Review: http://docs.sunpy.org/en/latest/dev_guide/pr_review_procedure.html#review-process +.. _`#sunpy-general:matrix.org`: https://riot.im/app/#/room/#sunpy-general:matrix.org diff --git a/MANIFEST.in b/MANIFEST.in index b09f139ef5f..badca8db5b8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,8 +3,8 @@ recursive-include sunpy *.h recursive-include sunpy *.c recursive-include sunpy/data * recursive-include sunpy *.txt -include README.md -include RELEASE.md +include README.rst +include RELEASE.rst include CHANGELOG.rst include licenses/*.rst include sunpy/data/sunpyrc diff --git a/README.md b/README.md deleted file mode 100644 index 06b5e90321e..00000000000 --- a/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# [SunPy](http://sunpy.org) -[![Latest Version](https://img.shields.io/pypi/v/sunpy.svg)](https://pypi.python.org/pypi/sunpy/) -[![Build Status](https://secure.travis-ci.org/sunpy/sunpy.svg)](http://travis-ci.org/sunpy/sunpy) -[![Build status](https://ci.appveyor.com/api/projects/status/xow461iejsjvp9vl?svg=true)](https://ci.appveyor.com/project/sunpy/sunpy) -[![codecov](https://codecov.io/gh/sunpy/sunpy/branch/master/graph/badge.svg)](https://codecov.io/gh/sunpy/sunpy) -[![Research software impact](http://depsy.org/api/package/pypi/sunpy/badge.svg)](http://depsy.org/package/python/sunpy) -[![DOI](https://zenodo.org/badge/2165383.svg)](https://zenodo.org/badge/latestdoi/2165383) -[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](http://numfocus.org) - -SunPy is an open-source Python library for solar physics data analysis. See [sunpy.org](http://sunpy.org) for more information about the project. - -For some examples of using SunPy see our [gallery](http://docs.sunpy.org/en/stable/generated/gallery/index.html). - - -Installation ------------- - -The recommended way to install SunPy is -with [conda](https://www.continuum.io/downloads). To install SunPy once conda is -installed run the following two commands: - - $ conda config --append channels conda-forge - $ conda install sunpy - - -If you want to develop SunPy you will need to install from git. The best way to -do this is to create a new conda environment and install the git version of -SunPy in it: - - $ conda config --append channels conda-forge - $ conda create -n sunpy-dev python sunpy hypothesis pytest-mock - $ source activate sunpy-dev - $ conda remove sunpy - $ git clone https://github.com/sunpy/sunpy.git sunpy-git - $ cd sunpy-git - $ pip install -e . - -For detailed installation instructions, see -the -[installation guide](http://docs.sunpy.org/en/latest/guide/installation/index.html) in -the SunPy docs. - -Usage ------ - -Here is a quick example of plotting an AIA image: - -```python ->>> import sunpy.map ->>> from sunpy.data.sample import AIA_171_IMAGE ->>> aia = sunpy.map.Map(AIA_171_IMAGE) ->>> aia.peek() -``` - -Getting Help ------------- - -For more information or to ask questions about SunPy, check out: - - * [SunPy Documentation](http://docs.sunpy.org/en/latest/) - * [SunPy Mailing List](https://groups.google.com/forum/#!forum/sunpy) - * [SunPy Matrix Channel](https://riot.im/app/#/room/#sunpy:matrix.org) - -Contributing ------------- - -[![Open Source Helpers](https://www.codetriage.com/sunpy/sunpy/badges/users.svg)](https://www.codetriage.com/sunpy/sunpy) - -If you would like to get involved, start by joining the -[SunPy mailing list](https://groups.google.com/forum/#!forum/sunpy) -and check out the [Developer's Guide](http://docs.sunpy.org/en/latest/dev_guide/index.html) section -of the SunPy docs. Stop by our chat room [#sunpy:matrix.org](https://riot.im/app/#/room/#sunpy:matrix.org) -if you have any questions. Help is always welcome so let us know what you like -to work on, or check out the [issues page](https://github.com/sunpy/sunpy/issues) -for the list of known outstanding items. - -For more information on contributing to SunPy, please read our -[contributing guide](https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.md). - -### Code of Conduct - -When you are interacting with the SunPy community you are asked to follow -our [Code of Conduct](https://github.com/sunpy/sunpy/wiki/Code-of-Conduct). diff --git a/README.rst b/README.rst new file mode 100644 index 00000000000..61a2f743707 --- /dev/null +++ b/README.rst @@ -0,0 +1,106 @@ +`SunPy`_ +======== + +|Latest Version| |Build Status| |Build status| |codecov| |Research software impact| |DOI| |Powered by NumFOCUS| + +SunPy is an open-source Python library for solar physics data analysis. +See `sunpy.org`_ for more information about the project. + +For some examples of using SunPy see our `gallery`_. + +Installation +------------ + +The recommended way to install SunPy is with `conda`_. +To install SunPy once conda is installed run the following two commands: + +.. code:: bash + + $ conda config --append channels conda-forge + $ conda install sunpy + +If you want to develop SunPy you will need to install from git. +The best way to do this is to create a new conda environment and install the git version of SunPy in it: + +.. code:: bash + + $ conda config --append channels conda-forge + $ conda create -n sunpy-dev python sunpy hypothesis pytest-mock + $ source activate sunpy-dev + $ conda remove sunpy + $ git clone https://github.com/sunpy/sunpy.git sunpy-git + $ cd sunpy-git + $ pip install -e . + +For detailed installation instructions, see the `installation guide`_ in the SunPy docs. + +Usage +----- + +Here is a quick example of plotting an AIA image: + +.. code:: python + + >>> import sunpy.map + >>> from sunpy.data.sample import AIA_171_IMAGE + >>> aia = sunpy.map.Map(AIA_171_IMAGE) + >>> aia.peek() + +Getting Help +------------ + +For more information or to ask questions about SunPy, check out: + +- `SunPy Documentation`_ +- `SunPy Matrix Channel`_ +- `SunPy Mailing List`_ + +Contributing +------------ + +|Open Source Helpers| + +If you would like to get involved, start by joining the `SunPy mailing list`_ and check out the `Developer’s Guide`_ section of the SunPy docs. +Stop by our chat room `#sunpy:matrix.org`_ if you have any questions. +Help is always welcome so let us know what you like to work on, or check out the `issues page`_ for the list of known outstanding items. + +For more information on contributing to SunPy, please read our `contributing guide`_ or the `Newcomers guide`_. + +Code of Conduct +~~~~~~~~~~~~~~~ + +When you are interacting with the SunPy community you are asked to +follow our `Code of Conduct`_. + +.. |Latest Version| image:: https://img.shields.io/pypi/v/sunpy.svg + :target: https://pypi.python.org/pypi/sunpy/ +.. |Build Status| image:: https://secure.travis-ci.org/sunpy/sunpy.svg + :target: http://travis-ci.org/sunpy/sunpy +.. |Build status| image:: https://ci.appveyor.com/api/projects/status/xow461iejsjvp9vl?svg=true + :target: https://ci.appveyor.com/project/sunpy/sunpy +.. |codecov| image:: https://codecov.io/gh/sunpy/sunpy/branch/master/graph/badge.svg + :target: https://codecov.io/gh/sunpy/sunpy +.. |Research software impact| image:: http://depsy.org/api/package/pypi/sunpy/badge.svg + :target: http://depsy.org/package/python/sunpy +.. |DOI| image:: https://zenodo.org/badge/2165383.svg + :target: https://zenodo.org/badge/latestdoi/2165383 +.. |Powered by NumFOCUS| image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A + :target: http://numfocus.org +.. |Open Source Helpers| image:: https://www.codetriage.com/sunpy/sunpy/badges/users.svg + :target: https://www.codetriage.com/sunpy/sunpy + +.. _SunPy: http://sunpy.org +.. _sunpy.org: http://sunpy.org +.. _gallery: http://docs.sunpy.org/en/stable/generated/gallery/index.html +.. _conda: https://www.continuum.io/downloads +.. _installation guide: http://docs.sunpy.org/en/latest/guide/installation/index.html +.. _SunPy Documentation: http://docs.sunpy.org/ +.. _SunPy Mailing List: https://groups.google.com/forum/#!forum/sunpy +.. _SunPy Matrix Channel: https://riot.im/app/#/room/#sunpy:matrix.org +.. _SunPy mailing list: https://groups.google.com/forum/#!forum/sunpy +.. _Developer’s Guide: http://docs.sunpy.org/en/latest/dev_guide/index.html +.. _`#sunpy:matrix.org`: https://riot.im/app/#/room/#sunpy:matrix.org +.. _issues page: https://github.com/sunpy/sunpy/issues +.. _contributing guide: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst +.. _Newcomers guide: http://docs.sunpy.org/en/stable/dev_guide/newcomers.html +.. _Code of Conduct: http://docs.sunpy.org/en/stable/coc.html diff --git a/RELEASE.md b/RELEASE.rst similarity index 50% rename from RELEASE.md rename to RELEASE.rst index eace750d027..4c76a5b4359 100644 --- a/RELEASE.md +++ b/RELEASE.rst @@ -1,19 +1,16 @@ The SunPy project is happy to announce the release of SunPy 0.8.0. -SunPy 0.8 is the next major release of SunPy and contains 1442 commits in over -200 pull requests, closing 163 issues from 35 people, 17 of whom have never -contributed before. +SunPy 0.8 is the next major release of SunPy and contains 1442 commits in over 200 pull requests, closing 163 issues from 35 people, 17 of whom have never contributed before. -This release is a large milestone for the SunPy library. It contains large new -features that have been in development for a number of years, and will form the -foundation of SunPy for the future. The main additions are `sunpy.net.Fido` and -`sunpy.timeseries`, as well as major upgrades to `sunpy.coordinates`. Along with -this it deprecates some old parts of SunPy that have been rewritten, two -submodules `sunpy.lightcurve` and `sunpy.wcs` have been superseded by -`sunpy.timeseries` and `sunpy.coordinates` respectively. +This release is a large milestone for the SunPy library. +It contains large new features that have been in development for a number of years, and will form the foundation of SunPy for the future. +The main additions are ``sunpy.net.Fido`` and ``sunpy.timeseries``, as well as major upgrades to ``sunpy.coordinates``. +Along with this it deprecates some old parts of SunPy that have been rewritten, two submodules ``sunpy.lightcurve`` and ``sunpy.wcs`` have been superseded by ``sunpy.timeseries`` and ``sunpy.coordinates`` respectively. The people who have contributed to the code for this release are: +:: + Stuart Mumford Jack Ireland Nabil Freij @@ -50,12 +47,8 @@ The people who have contributed to the code for this release are: Kaustubh Hiware * Larry Manley -Where an * indicates their first contribution. - +Where an \* indicates their first contribution. -In addition to the code contributions I would like to highlight the work of -Nabil Freij who has done a lot of work in the last few months releasing bug -fixes for the 0.7 series as well as a lot of behind the scenes work on SunPy's -tooling. I would also like to thank Duygu Keşkek who has redesigned the -sunpy.org page and the documentation as part of her GSOC project. Finally, I -would like to welcome Monica Bobra and Sabrina Savage to the SunPy Board. +In addition to the code contributions I would like to highlight the work of Nabil Freij who has done a lot of work in the last few months releasing bug fixes for the 0.7 series as well as a lot of behind the scenes work on SunPy’s tooling. +I would also like to thank Duygu Keşkek who has redesigned the sunpy.org page and the documentation as part of her GSOC project. +Finally, I would like to welcome Monica Bobra and Sabrina Savage to the SunPy Board. diff --git a/docs/dev_guide/newcomers.rst b/docs/dev_guide/newcomers.rst index 46484932b42..517ecbab1d6 100644 --- a/docs/dev_guide/newcomers.rst +++ b/docs/dev_guide/newcomers.rst @@ -96,6 +96,20 @@ If you get stuck or want help, just `ask here`_! .. _SunPy repository: https://github.com/sunpy/sunpy .. _ask here: https://riot.im/app/#/room/#sunpy-general:matrix.org +Astropy helpers +--------------- + +Within SunPy is a folder called `astropy_helpers` and this is a git submodule. +It can be common that this will be different to between branches or the main SunPy repository. +As a result, it can sometimes be added by accident to a commit you make. +Once this happens it can be difficult to remove it without using a `git rebase`. + +If you see that astropy_helpers is different or missing please run :: + + git submodule update --init + +which should fix the issue for you. + Send it back to us ------------------ @@ -107,7 +121,20 @@ To start you would need to commit the changes. git commit -a -m '<message>' Where you replace ``<message>`` with some text of the work you have done. -We strongly recommend having a read over `this guide`_ about how you write commit messages. +We strongly recommend having a read over `this guide about how you write commit messages <https://chris.beams.io/posts/git-commit/>`_. + +In addition to this, we support several custom tags you can add anywhere in the commit message. +Please use these tags extensively, especially for documentation PRs and WIP commits. + +* pep8speaks: Performs a PEP8 check on any submitted code. +* `CircleCi <https://circleci.com/gh/sunpy/sunpy/>`_: Tests to see if sunpy installs and builds the documentation. +* Giles: Returns a link if the documentation does build successfully. +* `Travis <https://travis-ci.org/sunpy/sunpy>`_: Runs our test suite to make sure it passes on Linux and mac OS. +* `AppVeyor <https://ci.appveyor.com/project/sunpy/sunpy>`_: Runs our test suite to make sure it passes on Windows. +* `CodeCov <https://codecov.io/gh/sunpy/sunpy/>`_: Checks how many lines of the code lack test coverage. + +We have auto-cancellation enabled on Appveyor, Travis and CircleCi for SunPy core. +This means that queued builds for commits are cancelled if there is a newer commit pushed to that given branch. Next step is to submit the changes back to SunPy. @@ -130,7 +157,6 @@ This can be sent to a contributor or attached in the Google group. Just remember, if you hit any problems get in touch! -.. _this guide: https://chris.beams.io/posts/git-commit/ .. _friendly guide: https://guides.github.com/activities/hello-world/ .. _Google Group: https://groups.google.com/forum/#!forum/sunpy .. _email a SunPy contributor: [email protected] diff --git a/docs/dev_guide/pr_review_procedure.rst b/docs/dev_guide/pr_review_procedure.rst index 021605827ce..82624c3f3da 100644 --- a/docs/dev_guide/pr_review_procedure.rst +++ b/docs/dev_guide/pr_review_procedure.rst @@ -26,6 +26,20 @@ Before the ‘merge’ button is clicked the following criteria must be met: It is important that approval for merging the PR is done on the comment thread, as this becomes part of the ‘permanent record’, this includes in during community meetings or in chat. +Continuous Integration +====================== + +Currently we have a variety of bots or services that respond or activate on an opened pull request. +While we try not to change them, they have undergone several changes with the aim of making them clear and focused on specific issues. + +* pep8speaks: Performs a PEP8 check on any submitted code. +* `CircleCi <https://circleci.com/gh/sunpy/sunpy/>`_: Tests to see if sunpy installs and builds the documentation. +* Giles: Returns a link if the documentation does build successfully. +* `Travis <https://travis-ci.org/sunpy/sunpy>`_: Runs our test suite to make sure it passes on Linux and mac OS. +* `AppVeyor <https://ci.appveyor.com/project/sunpy/sunpy>`_: Runs our test suite to make sure it passes on Windows. +* `CodeCov <https://codecov.io/gh/sunpy/sunpy/>`_: Checks how many lines of the code lack test coverage. + + SunPy GitHub Groups =================== diff --git a/docs/dev_guide/version_control.rst b/docs/dev_guide/version_control.rst index 15b8029b643..78c3a79759e 100644 --- a/docs/dev_guide/version_control.rst +++ b/docs/dev_guide/version_control.rst @@ -310,6 +310,66 @@ the resolution of the conflict with: :: You can then proceed to push this change up to your branch. +Rebasing +^^^^^^^^ + +Sometimes it might be better to instead of merging in upstream/master, to rebase on top of upstream/master, or if you would like to clean up your commit history if you deem it messy. +**However**, be warned that rebasing is a nuclear option. +If it goes wrong, it fundamentally changes your git history, there is no way back if you do not have a copy somewhere else, say a local branch for example. +You can also back out of a rebase during the process. + +We will have a brief example here but since rebasing is a major step (depending on the complexity of the pull request) we would recommend checking out one of these tutorials on the subject: `tutorial 1 <https://www.digitalocean.com/community/tutorials/how-to-rebase-and-update-a-pull-request>`_ and `tutorial 2 <https://www.atlassian.com/git/tutorials/rewriting-history/git-rebase>`_. + +With the above warning in mind, you can create a local copy of the branch you want to rebase :: + + git commit -m "My last messy commit" + git checkout -b MyCleanNewFeature + +and you still have your git history from `MyCleanNewFeature` in its branch + +If you are on your own branch and you have upstream added as a remote. +You can do :: + + git rebase upstream/master + +which will rebase your commits on top of upstream/master and if there are no major changes, it should complete with no problem. +If you add a `-i`, this will turn on interactive mode :: + + git rebase -i upstream/master + +you should see something like this :: + + pick 2231360 some old commit + pick g3s62dc some mid commit you want to remove + pick ee2adc2 Adds new feature + # Rebase 2cf755d..ee2adc2 onto 2cf755d (9 commands) + # + # Commands: + # p, pick = use commit + # r, reword = use commit, but edit the commit message + # e, edit = use commit, but stop for amending + # s, squash = use commit, but meld into previous commit + # f, fixup = like "squash", but discard this commit's log message + # x, exec = run command (the rest of the line) using shell + # d, drop = remove commit + +Here you can change `pick` to any of the other commands that are listed and have that change the commits in your local history. +So if you wanted to remove the middle commit you would change :: + + pick g3s62dc some mid commit you want to remove + +to :: + + drop g3s62dc some mid commit you want to remove + +or if you wanted to keep the changes merge that commit into the previous commit :: + + squash g3s62dc some mid commit you want to remove + +Now when you exit the screen, git will now apply the changes you are after. + +If any problem arises, git will tell you and allow you either work through the problem using `git mergetool` or to abort the process `git rebase --abort`. + Backporting contribution ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/index.rst b/docs/index.rst index 97fb65c428e..6304012539c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,6 +4,8 @@ SunPy Documentation Welcome to the SunPy documentation. SunPy is a community-developed, free and open-source solar data analysis environment for Python. +We have a documentation :any:`index <genindex>` and a :any:`module <modindex>` list. + .. toctree:: :maxdepth: 2 diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index 47fe6adf9bf..00000000000 --- a/readthedocs.yml +++ /dev/null @@ -1,5 +0,0 @@ -conda: - file: .rtd-environment.yml - -python: - setup_py_install: true diff --git a/sunpy/net/jsoc/attrs.py b/sunpy/net/jsoc/attrs.py index ce646fd0348..7f47bb9d874 100644 --- a/sunpy/net/jsoc/attrs.py +++ b/sunpy/net/jsoc/attrs.py @@ -12,8 +12,7 @@ class Series(_VSOSimpleAttr): """ The JSOC Series to Download. - See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_` - for a list of series'. + This is the list of `Series <http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`. """ pass
liqd__a4-meinberlin-1970
district, topic and localisation for external projects and bplans external projects also need district, topic and localisation as the other projects do. same is true for b-plans as not all of them come via imperia
[ { "content": "import json\nimport logging\nimport urllib\n\nfrom background_task import background\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom meinberlin.apps.bplan.models import Bplan\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_features_from_bplan_api(endpoint):\n url = 'https://bplan-prod.liqd.net/api/' + endpoint\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n res_body = res.read()\n res_json = json.loads(res_body.decode(\"utf-8\"))\n\n return res_json.get('features')\n\n\ndef get_bplan_point_and_district_pk(bplan_identifier):\n url_poi = 'bplan/points/' + \\\n '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))\n\n try:\n features = get_features_from_bplan_api(url_poi)\n if features:\n district_pk = features[0]['properties']['bezirk']\n point = features[0]\n\n return point, district_pk\n\n return None, None\n\n except UnicodeEncodeError:\n # catches bplan-identifiers with problematic chars\n pass\n\n\ndef get_bplan_api_pk_to_a4_admin_district_dict():\n url_dis = 'bezirke/'\n features = get_features_from_bplan_api(url_dis)\n dis_dict = {}\n if features:\n for district in features:\n\n dis_model = AdministrativeDistrict.objects.filter(\n name=district['properties']['name']\n )\n if dis_model:\n dis_dict[district['properties']['pk']] = \\\n dis_model[0]\n else:\n dis_dict[district['properties']['pk']] = None\n\n return dis_dict\n\n\n@background(schedule=0)\ndef get_location_information(bplan_id):\n bplan = Bplan.objects.get(pk=bplan_id)\n point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)\n dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()\n\n if district_pk:\n bplan.administrative_district = \\\n dis_dict[district_pk]\n else:\n logger.error(\n \"The identifier '{}' for bplan '{}' seems to be wrong. \"\n \"It doesn't exist on https://bplan-prod.liqd.net/api/\"\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n bplan.save(update_fields=['point', 'administrative_district'])\n", "path": "meinberlin/apps/bplan/tasks.py" } ]
[ { "content": "import json\nimport logging\nimport urllib\n\nfrom background_task import background\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom meinberlin.apps.bplan.models import Bplan\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_features_from_bplan_api(endpoint):\n url = 'https://bplan-prod.liqd.net/api/' + endpoint\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n res_body = res.read()\n res_json = json.loads(res_body.decode(\"utf-8\"))\n\n return res_json.get('features')\n\n\ndef get_bplan_point_and_district_pk(bplan_identifier):\n url_poi = 'bplan/points/' + \\\n '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))\n\n try:\n features = get_features_from_bplan_api(url_poi)\n if features:\n district_pk = features[0]['properties']['bezirk']\n point = features[0]\n\n return point, district_pk\n\n return None, None\n\n except UnicodeEncodeError:\n # catches bplan-identifiers with problematic chars\n pass\n\n\ndef get_bplan_api_pk_to_a4_admin_district_dict():\n url_dis = 'bezirke/'\n features = get_features_from_bplan_api(url_dis)\n dis_dict = {}\n if features:\n for district in features:\n\n dis_model = AdministrativeDistrict.objects.filter(\n name=district['properties']['name']\n )\n if dis_model:\n dis_dict[district['properties']['pk']] = \\\n dis_model[0]\n else:\n dis_dict[district['properties']['pk']] = None\n\n return dis_dict\n\n\n@background(schedule=0)\ndef get_location_information(bplan_id):\n bplan = Bplan.objects.get(pk=bplan_id)\n point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)\n dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()\n\n if district_pk:\n bplan.administrative_district = \\\n dis_dict[district_pk]\n else:\n logger.error(\n \"The identifier '{}' for bplan '{}' seems to be wrong. \"\n \"It doesn't exist on https://bplan-prod.liqd.net/api/\"\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n bplan.topics = ['URB']\n bplan.save(update_fields=['point', 'administrative_district', 'topics'])\n", "path": "meinberlin/apps/bplan/tasks.py" } ]
diff --git a/meinberlin/apps/bplan/tasks.py b/meinberlin/apps/bplan/tasks.py index 509de8b14f..5599260cde 100644 --- a/meinberlin/apps/bplan/tasks.py +++ b/meinberlin/apps/bplan/tasks.py @@ -74,4 +74,5 @@ def get_location_information(bplan_id): .format(bplan.identifier, bplan) ) bplan.point = point - bplan.save(update_fields=['point', 'administrative_district']) + bplan.topics = ['URB'] + bplan.save(update_fields=['point', 'administrative_district', 'topics'])
cloud-custodian__cloud-custodian-8692
AWS user pool and identity pool resources.json has minimal information ### Describe the bug For the below custodian policy, the resources.json is created for each of the policy on successful execution. For user-pool and identity-pool, the resources.json file does not include full description/configuration of the resources, instead it contains very minimal information like ID, NAME, creation date etc. (as mentioned below) whereas for EC2 and Lambda, the populated resources.json has hundreds of metadata information. --- ##custodian.yaml policies: - name: cognito-checkauditmode resource: aws.user-pool - name: identity-checkauditmode resource: identity-pool - name: ec2-checkrunning resource: ec2 - name: find-all-lambdas resource: aws.lambda --- ##resources.json - cognito-checkauditmode [ { "Id": "xxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "LambdaConfig": {}, "LastModifiedDate": "2023-06-29T08:56:18.028000-05:00", "CreationDate": "2023-06-29T08:56:17.860000-05:00", "Tags": [] }, { "Id": "xxxxxxxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "LambdaConfig": {}, "LastModifiedDate": "2020-06-11T17:15:18.951000-05:00", "CreationDate": "2020-02-21T11:39:18.108000-06:00", "Tags": [] } ] --- ## resources.json - identity-checkauditmode [ { "IdentityPoolId": "xxxxxxxxxxxxxxxxxxx", "IdentityPoolName": "xxxxxxxxxxxxxxxxxxx", "Tags": [] } ] ### What did you expect to happen? Expecting a large json file with full configuration of the resource. Below is the AWS CLI command and the truncated response from CLI. Expecting a similar response. --- aws cognito-idp describe-user-pool --user-pool-id xxxxxxxxxxxxxxxxxxx --- truncated response { "UserPool": { "Id": "xxxxxxxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "Policies": { "PasswordPolicy": { "MinimumLength": 8, "RequireUppercase": true, "RequireLowercase": true, "RequireNumbers": true, "RequireSymbols": true, "TemporaryPasswordValidityDays": 7 } }, "DeletionProtection": "INACTIVE", "LambdaConfig": {}, "LastModifiedDate": "2020-06-11T17:15:18.951000-05:00", "CreationDate": "2020-02-21T11:39:18.108000-06:00", "SchemaAttributes": [ { "Name": "sub", "AttributeDataType": "String", "DeveloperOnlyAttribute": false, "Mutable": false, "Required": true, "StringAttributeConstraints": { "MinLength": "1", "MaxLength": "2048" } }, ### Cloud Provider Amazon Web Services (AWS) ### Cloud Custodian version and dependency information ```shell Custodian: 0.9.27 Python: 3.11.4 (main, Jun 7 2023, 00:34:59) [Clang 14.0.3 (clang-1403.0.22.14.1)] Platform: posix.uname_result(sysname='Darwin', nodename='MABPWKJJ4T9RYW', release='22.5.0', version='Darwin Kernel Version 22.5.0: Thu Jun 8 22:22:23 PDT 2023; root:xnu-8796.121.3~7/RELEASE_ARM64_T6020', machine='arm64') Using venv: False Docker: False Installed: argcomplete==3.0.8 attrs==23.1.0 boto3==1.26.139 botocore==1.29.139 docutils==0.18.1 importlib-metadata==5.2.0 jmespath==1.0.1 jsonschema==4.17.3 pyrsistent==0.19.3 python-dateutil==2.8.2 pyyaml==6.0 s3transfer==0.6.1 six==1.16.0 tabulate==0.9.0 typing-extensions==4.6.3 urllib3==1.26.16 zipp==3.15.0 ``` ### Policy ```shell ##custodian.yaml policies: - name: cognito-checkauditmode resource: aws.user-pool - name: identity-checkauditmode resource: identity-pool - name: ec2-checkrunning resource: ec2 - name: find-all-lambdas resource: aws.lambda ``` ### Relevant log/traceback output ```shell 2023-06-26 20:09:45,838 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.00 2023-06-26 20:20:16,225 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.70 2023-06-26 20:25:23,030 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:09:38,143 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.73 2023-06-26 23:13:37,202 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:17:02,042 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:18:59,196 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00 2023-06-26 23:28:37,082 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.67 2023-06-27 09:11:53,373 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.67 2023-06-27 09:13:07,745 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-27 09:22:13,584 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00 2023-06-27 09:22:42,984 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.65 2023-06-27 09:24:43,016 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:0 time:0.62 2023-06-27 09:27:15,604 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.64 2023-06-29 08:58:25,076 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:2 time:0.64 ``` ### Extra information or context Applied few additional filters and that as well failed. I believe, the filters will work only after the describe is successful
[ { "content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom botocore.exceptions import ClientError\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo, DescribeSource\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\n\nclass DescribeIdentityPool(DescribeSource):\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\nclass DescribeUserPool(DescribeSource):\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeIdentityPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteIdentityPool(BaseAction):\n \"\"\"Action to delete cognito identity pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: identity-pool-delete\n resource: identity-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-identity:DeleteIdentityPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-identity')\n try:\n client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting identity pool:\\n %s\" % e)\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeUserPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteUserPool(BaseAction):\n \"\"\"Action to delete cognito user pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: user-pool-delete\n resource: user-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-idp:DeleteUserPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-idp')\n try:\n client.delete_user_pool(UserPoolId=pool['Id'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting user pool:\\n %s\" % e)\n", "path": "c7n/resources/cognito.py" } ]
[ { "content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom botocore.exceptions import ClientError\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo, DescribeSource\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\n\nclass DescribeIdentityPool(DescribeSource):\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\nclass DescribeUserPool(DescribeSource):\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeIdentityPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteIdentityPool(BaseAction):\n \"\"\"Action to delete cognito identity pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: identity-pool-delete\n resource: identity-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-identity:DeleteIdentityPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-identity')\n try:\n client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting identity pool:\\n %s\" % e)\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeUserPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteUserPool(BaseAction):\n \"\"\"Action to delete cognito user pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: user-pool-delete\n resource: user-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-idp:DeleteUserPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-idp')\n try:\n client.delete_user_pool(UserPoolId=pool['Id'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting user pool:\\n %s\" % e)\n", "path": "c7n/resources/cognito.py" } ]
diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py index 05351adaa45..6bfa7853826 100644 --- a/c7n/resources/cognito.py +++ b/c7n/resources/cognito.py @@ -11,6 +11,7 @@ class DescribeIdentityPool(DescribeSource): def augment(self, resources): + resources = super().augment(resources) return universal_augment(self.manager, resources) diff --git a/tests/test_cognito.py b/tests/test_cognito.py index bacc05b7ba5..ccc54afd65b 100644 --- a/tests/test_cognito.py +++ b/tests/test_cognito.py @@ -51,6 +51,12 @@ def test_query_identity_pool(self): sorted([n["IdentityPoolName"] for n in resources]), ["origin_MOBILEHUB_1667653900", "test_delete_id_pool"], ) + # Confirm that our augment pass has tag information and detail + # from describe_identity_pool + self.assertLessEqual( + {"IdentityPoolId", "Tags", "CognitoIdentityProviders"}, + set(resources[0]) + ) def test_delete_identity_pool(self): factory = self.replay_flight_data("test_cognito-identity-pool_delete")
graphql-python__graphene-django-1105
This will break/lead to not working tests if your endpoint is not '/graphql' https://github.com/graphql-python/graphene-django/blob/b66a3f347947804d0ab7d9763309e2977b5bcd5a/graphene_django/utils/testing.py#L12
[ { "content": "\"\"\"\nSettings for Graphene are all namespaced in the GRAPHENE setting.\nFor example your project's `settings.py` file might look like this:\nGRAPHENE = {\n 'SCHEMA': 'my_app.schema.schema'\n 'MIDDLEWARE': (\n 'graphene_django.debug.DjangoDebugMiddleware',\n )\n}\nThis module provides the `graphene_settings` object, that is used to access\nGraphene settings, checking for user settings first, then falling\nback to the defaults.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.test.signals import setting_changed\n\nimport importlib # Available in Python 3.1+\n\n\n# Copied shamelessly from Django REST Framework\n\nDEFAULTS = {\n \"SCHEMA\": None,\n \"SCHEMA_OUTPUT\": \"schema.json\",\n \"SCHEMA_INDENT\": 2,\n \"MIDDLEWARE\": (),\n # Set to True if the connection fields must have\n # either the first or last argument\n \"RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST\": False,\n # Max items returned in ConnectionFields / FilterConnectionFields\n \"RELAY_CONNECTION_MAX_LIMIT\": 100,\n \"CAMELCASE_ERRORS\": True,\n # Set to True to enable v2 naming convention for choice field Enum's\n \"DJANGO_CHOICE_FIELD_ENUM_V2_NAMING\": False,\n \"DJANGO_CHOICE_FIELD_ENUM_CUSTOM_NAME\": None,\n # Use a separate path for handling subscriptions.\n \"SUBSCRIPTION_PATH\": None,\n # By default GraphiQL headers editor tab is enabled, set to False to hide it\n # This sets headerEditorEnabled GraphiQL option, for details go to\n # https://github.com/graphql/graphiql/tree/main/packages/graphiql#options\n \"GRAPHIQL_HEADER_EDITOR_ENABLED\": True,\n \"GRAPHIQL_SHOULD_PERSIST_HEADERS\": False,\n \"ATOMIC_MUTATIONS\": False,\n}\n\nif settings.DEBUG:\n DEFAULTS[\"MIDDLEWARE\"] += (\"graphene_django.debug.DjangoDebugMiddleware\",)\n\n# List of settings that may be in string import notation.\nIMPORT_STRINGS = (\"MIDDLEWARE\", \"SCHEMA\")\n\n\ndef perform_import(val, setting_name):\n \"\"\"\n If the given setting is a string import notation,\n then perform the necessary import or imports.\n \"\"\"\n if val is None:\n return None\n elif isinstance(val, str):\n return import_from_string(val, setting_name)\n elif isinstance(val, (list, tuple)):\n return [import_from_string(item, setting_name) for item in val]\n return val\n\n\ndef import_from_string(val, setting_name):\n \"\"\"\n Attempt to import a class from a string representation.\n \"\"\"\n try:\n # Nod to tastypie's use of importlib.\n parts = val.split(\".\")\n module_path, class_name = \".\".join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as e:\n msg = \"Could not import '%s' for Graphene setting '%s'. %s: %s.\" % (\n val,\n setting_name,\n e.__class__.__name__,\n e,\n )\n raise ImportError(msg)\n\n\nclass GrapheneSettings(object):\n \"\"\"\n A settings object, that allows API settings to be accessed as properties.\n For example:\n from graphene_django.settings import settings\n print(settings.SCHEMA)\n Any setting with string import paths will be automatically resolved\n and return the class, rather than the string literal.\n \"\"\"\n\n def __init__(self, user_settings=None, defaults=None, import_strings=None):\n if user_settings:\n self._user_settings = user_settings\n self.defaults = defaults or DEFAULTS\n self.import_strings = import_strings or IMPORT_STRINGS\n\n @property\n def user_settings(self):\n if not hasattr(self, \"_user_settings\"):\n self._user_settings = getattr(settings, \"GRAPHENE\", {})\n return self._user_settings\n\n def __getattr__(self, attr):\n if attr not in self.defaults:\n raise AttributeError(\"Invalid Graphene setting: '%s'\" % attr)\n\n try:\n # Check if present in user settings\n val = self.user_settings[attr]\n except KeyError:\n # Fall back to defaults\n val = self.defaults[attr]\n\n # Coerce import strings into classes\n if attr in self.import_strings:\n val = perform_import(val, attr)\n\n # Cache the result\n setattr(self, attr, val)\n return val\n\n\ngraphene_settings = GrapheneSettings(None, DEFAULTS, IMPORT_STRINGS)\n\n\ndef reload_graphene_settings(*args, **kwargs):\n global graphene_settings\n setting, value = kwargs[\"setting\"], kwargs[\"value\"]\n if setting == \"GRAPHENE\":\n graphene_settings = GrapheneSettings(value, DEFAULTS, IMPORT_STRINGS)\n\n\nsetting_changed.connect(reload_graphene_settings)\n", "path": "graphene_django/settings.py" } ]
[ { "content": "\"\"\"\nSettings for Graphene are all namespaced in the GRAPHENE setting.\nFor example your project's `settings.py` file might look like this:\nGRAPHENE = {\n 'SCHEMA': 'my_app.schema.schema'\n 'MIDDLEWARE': (\n 'graphene_django.debug.DjangoDebugMiddleware',\n )\n}\nThis module provides the `graphene_settings` object, that is used to access\nGraphene settings, checking for user settings first, then falling\nback to the defaults.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.test.signals import setting_changed\n\nimport importlib # Available in Python 3.1+\n\n\n# Copied shamelessly from Django REST Framework\n\nDEFAULTS = {\n \"SCHEMA\": None,\n \"SCHEMA_OUTPUT\": \"schema.json\",\n \"SCHEMA_INDENT\": 2,\n \"MIDDLEWARE\": (),\n # Set to True if the connection fields must have\n # either the first or last argument\n \"RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST\": False,\n # Max items returned in ConnectionFields / FilterConnectionFields\n \"RELAY_CONNECTION_MAX_LIMIT\": 100,\n \"CAMELCASE_ERRORS\": True,\n # Set to True to enable v2 naming convention for choice field Enum's\n \"DJANGO_CHOICE_FIELD_ENUM_V2_NAMING\": False,\n \"DJANGO_CHOICE_FIELD_ENUM_CUSTOM_NAME\": None,\n # Use a separate path for handling subscriptions.\n \"SUBSCRIPTION_PATH\": None,\n # By default GraphiQL headers editor tab is enabled, set to False to hide it\n # This sets headerEditorEnabled GraphiQL option, for details go to\n # https://github.com/graphql/graphiql/tree/main/packages/graphiql#options\n \"GRAPHIQL_HEADER_EDITOR_ENABLED\": True,\n \"GRAPHIQL_SHOULD_PERSIST_HEADERS\": False,\n \"ATOMIC_MUTATIONS\": False,\n \"TESTING_ENDPOINT\": \"/graphql\",\n}\n\nif settings.DEBUG:\n DEFAULTS[\"MIDDLEWARE\"] += (\"graphene_django.debug.DjangoDebugMiddleware\",)\n\n# List of settings that may be in string import notation.\nIMPORT_STRINGS = (\"MIDDLEWARE\", \"SCHEMA\")\n\n\ndef perform_import(val, setting_name):\n \"\"\"\n If the given setting is a string import notation,\n then perform the necessary import or imports.\n \"\"\"\n if val is None:\n return None\n elif isinstance(val, str):\n return import_from_string(val, setting_name)\n elif isinstance(val, (list, tuple)):\n return [import_from_string(item, setting_name) for item in val]\n return val\n\n\ndef import_from_string(val, setting_name):\n \"\"\"\n Attempt to import a class from a string representation.\n \"\"\"\n try:\n # Nod to tastypie's use of importlib.\n parts = val.split(\".\")\n module_path, class_name = \".\".join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as e:\n msg = \"Could not import '%s' for Graphene setting '%s'. %s: %s.\" % (\n val,\n setting_name,\n e.__class__.__name__,\n e,\n )\n raise ImportError(msg)\n\n\nclass GrapheneSettings(object):\n \"\"\"\n A settings object, that allows API settings to be accessed as properties.\n For example:\n from graphene_django.settings import settings\n print(settings.SCHEMA)\n Any setting with string import paths will be automatically resolved\n and return the class, rather than the string literal.\n \"\"\"\n\n def __init__(self, user_settings=None, defaults=None, import_strings=None):\n if user_settings:\n self._user_settings = user_settings\n self.defaults = defaults or DEFAULTS\n self.import_strings = import_strings or IMPORT_STRINGS\n\n @property\n def user_settings(self):\n if not hasattr(self, \"_user_settings\"):\n self._user_settings = getattr(settings, \"GRAPHENE\", {})\n return self._user_settings\n\n def __getattr__(self, attr):\n if attr not in self.defaults:\n raise AttributeError(\"Invalid Graphene setting: '%s'\" % attr)\n\n try:\n # Check if present in user settings\n val = self.user_settings[attr]\n except KeyError:\n # Fall back to defaults\n val = self.defaults[attr]\n\n # Coerce import strings into classes\n if attr in self.import_strings:\n val = perform_import(val, attr)\n\n # Cache the result\n setattr(self, attr, val)\n return val\n\n\ngraphene_settings = GrapheneSettings(None, DEFAULTS, IMPORT_STRINGS)\n\n\ndef reload_graphene_settings(*args, **kwargs):\n global graphene_settings\n setting, value = kwargs[\"setting\"], kwargs[\"value\"]\n if setting == \"GRAPHENE\":\n graphene_settings = GrapheneSettings(value, DEFAULTS, IMPORT_STRINGS)\n\n\nsetting_changed.connect(reload_graphene_settings)\n", "path": "graphene_django/settings.py" } ]
diff --git a/docs/settings.rst b/docs/settings.rst index 1984a154c..5bffd08f9 100644 --- a/docs/settings.rst +++ b/docs/settings.rst @@ -189,7 +189,7 @@ Default: ``None`` ``GRAPHIQL_HEADER_EDITOR_ENABLED`` ---------------------- +---------------------------------- GraphiQL starting from version 1.0.0 allows setting custom headers in similar fashion to query variables. @@ -209,6 +209,20 @@ Default: ``True`` } +``TESTING_ENDPOINT`` +-------------------- + +Define the graphql endpoint url used for the `GraphQLTestCase` class. + +Default: ``/graphql`` + +.. code:: python + + GRAPHENE = { + 'TESTING_ENDPOINT': '/customEndpoint' + } + + ``GRAPHIQL_SHOULD_PERSIST_HEADERS`` --------------------- diff --git a/docs/testing.rst b/docs/testing.rst index fb0a85dc4..1b3235218 100644 --- a/docs/testing.rst +++ b/docs/testing.rst @@ -6,7 +6,8 @@ Using unittest If you want to unittest your API calls derive your test case from the class `GraphQLTestCase`. -Your endpoint is set through the `GRAPHQL_URL` attribute on `GraphQLTestCase`. The default endpoint is `GRAPHQL_URL = "/graphql/"`. +The default endpoint for testing is `/graphql`. You can override this in the `settings <https://docs.graphene-python.org/projects/django/en/latest/settings/#testing-endpoint>`__. + Usage: diff --git a/graphene_django/settings.py b/graphene_django/settings.py index 0fd70a721..6f6232687 100644 --- a/graphene_django/settings.py +++ b/graphene_django/settings.py @@ -43,6 +43,7 @@ "GRAPHIQL_HEADER_EDITOR_ENABLED": True, "GRAPHIQL_SHOULD_PERSIST_HEADERS": False, "ATOMIC_MUTATIONS": False, + "TESTING_ENDPOINT": "/graphql", } if settings.DEBUG: diff --git a/graphene_django/utils/testing.py b/graphene_django/utils/testing.py index f94c38574..ca0d18506 100644 --- a/graphene_django/utils/testing.py +++ b/graphene_django/utils/testing.py @@ -3,6 +3,8 @@ from django.test import Client, TestCase, TransactionTestCase +from graphene_django.settings import graphene_settings + DEFAULT_GRAPHQL_URL = "/graphql" @@ -40,7 +42,7 @@ def graphql_query( if client is None: client = Client() if not graphql_url: - graphql_url = DEFAULT_GRAPHQL_URL + graphql_url = graphene_settings.TESTING_ENDPOINT body = {"query": query} if operation_name: @@ -69,7 +71,7 @@ class GraphQLTestMixin(object): """ # URL to graphql endpoint - GRAPHQL_URL = DEFAULT_GRAPHQL_URL + GRAPHQL_URL = graphene_settings.TESTING_ENDPOINT def query( self, query, operation_name=None, input_data=None, variables=None, headers=None diff --git a/graphene_django/utils/tests/test_testing.py b/graphene_django/utils/tests/test_testing.py index 2ef78f99b..de5615859 100644 --- a/graphene_django/utils/tests/test_testing.py +++ b/graphene_django/utils/tests/test_testing.py @@ -2,6 +2,7 @@ from .. import GraphQLTestCase from ...tests.test_types import with_local_registry +from ...settings import graphene_settings from django.test import Client @@ -43,3 +44,11 @@ def runTest(self): with pytest.warns(PendingDeprecationWarning): tc._client = Client() + + +def test_graphql_test_case_imports_endpoint(): + """ + GraphQLTestCase class should import the default endpoint from settings file + """ + + assert GraphQLTestCase.GRAPHQL_URL == graphene_settings.TESTING_ENDPOINT
searxng__searxng-437
Bug: microsoft academic engine **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/tiekoetter/searxng Branch: master Version: 1.0.0-972-93548243 <!-- Check if these values are correct --> **How did you install SearXNG?** <!-- Did you install SearXNG using the official wiki or using searxng-docker or manually by executing the searx/webapp.py file? --> **What happened?** <!-- A clear and concise description of what the bug is. --> **How To Reproduce** <!-- How can we reproduce this issue? (as minimally and as precisely as possible) --> **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> **Technical report** Error * Error: httpx.TimeoutException * Percentage: 50 * Parameters: `(None, None, None)` * File name: `searx/search/processors/online.py:97` * Function: `_send_http_request` * Code: `response = req(params['url'], **request_args)`
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Microsoft Academic (Science)\n\"\"\"\n\nfrom json import dumps, loads\nfrom searx.utils import html_to_text\n\n# about\nabout = {\n \"website\": 'https://academic.microsoft.com',\n \"wikidata_id\": 'Q28136779',\n \"official_api_documentation\": 'http://ma-graph.org/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\ncategories = ['images']\npaging = True\nsearch_url = 'https://academic.microsoft.com/api/search'\n_paper_url = 'https://academic.microsoft.com/paper/{id}/reference'\n\n\ndef request(query, params):\n params['url'] = search_url\n params['method'] = 'POST'\n params['headers']['content-type'] = 'application/json; charset=utf-8'\n params['data'] = dumps({\n 'query': query,\n 'queryExpression': '',\n 'filters': [],\n 'orderBy': 0,\n 'skip': (params['pageno'] - 1) * 10,\n 'sortAscending': True,\n 'take': 10,\n 'includeCitationContexts': False,\n 'profileId': '',\n })\n\n return params\n\n\ndef response(resp):\n results = []\n response_data = loads(resp.text)\n if not response_data:\n return results\n\n for result in response_data['pr']:\n if 'dn' not in result['paper']:\n continue\n\n title = result['paper']['dn']\n content = _get_content(result['paper'])\n url = _paper_url.format(id=result['paper']['id'])\n results.append({\n 'url': url,\n 'title': html_to_text(title),\n 'content': html_to_text(content),\n })\n\n return results\n\n\ndef _get_content(result):\n if 'd' in result:\n content = result['d']\n if len(content) > 300:\n return content[:300] + '...'\n return content\n\n return ''\n", "path": "searx/engines/microsoft_academic.py" } ]
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Microsoft Academic (Science)\n\"\"\"\n\nfrom json import dumps, loads\nfrom searx.utils import html_to_text\n\n# about\nabout = {\n \"website\": 'https://academic.microsoft.com',\n \"wikidata_id\": 'Q28136779',\n \"official_api_documentation\": 'http://ma-graph.org/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\ncategories = ['images']\npaging = True\nsearch_url = 'https://academic.microsoft.com/api/search'\n_paper_url = 'https://academic.microsoft.com/paper/{id}/reference'\n\n\ndef request(query, params):\n params['url'] = search_url\n params['method'] = 'POST'\n params['headers']['content-type'] = 'application/json; charset=utf-8'\n params['data'] = dumps({\n 'query': query,\n 'queryExpression': '',\n 'filters': [],\n 'orderBy': 0,\n 'skip': (params['pageno'] - 1) * 10,\n 'sortAscending': True,\n 'take': 10,\n 'includeCitationContexts': False,\n 'profileId': '',\n })\n\n return params\n\n\ndef response(resp):\n results = []\n response_data = loads(resp.text)\n if not response_data:\n return results\n\n for result in response_data.get('pr', {}):\n if 'dn' not in result['paper']:\n continue\n\n title = result['paper']['dn']\n content = _get_content(result['paper'])\n url = _paper_url.format(id=result['paper']['id'])\n results.append({\n 'url': url,\n 'title': html_to_text(title),\n 'content': html_to_text(content),\n })\n\n return results\n\n\ndef _get_content(result):\n if 'd' in result:\n content = result['d']\n if len(content) > 300:\n return content[:300] + '...'\n return content\n\n return ''\n", "path": "searx/engines/microsoft_academic.py" } ]
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py index 82a5d35502f..c9961104991 100644 --- a/searx/engines/microsoft_academic.py +++ b/searx/engines/microsoft_academic.py @@ -47,7 +47,7 @@ def response(resp): if not response_data: return results - for result in response_data['pr']: + for result in response_data.get('pr', {}): if 'dn' not in result['paper']: continue diff --git a/searx/settings.yml b/searx/settings.yml index 514eb865e90..f53e3257b50 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -873,6 +873,7 @@ engines: engine: microsoft_academic categories: science shortcut: ma + timeout: 6.0 - name: mixcloud engine: mixcloud
pyca__cryptography-4037
Bug in HKDF? I think the computation of [`max_length`](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L70) in `src/cryptography/hazmat/primitives/kdf/hkdf.py` is wrong. [RFC5869](https://tools.ietf.org/html/rfc5869) states on page 3 that the input `L` of the HKDF-Expand function describes the "length of output keying material in octets (<= 255*HashLen)". An octet consists of 8 bit. Currently, `max_length` is computed as: ``` max_length = 255 * (algorithm.digest_size // 8) ``` The problem is, that `algorithm.digest_size` returns the size of the digest in bytes. (There are 8 bits per byte). Therefore, the division by 8 is wrong, and thus, `max_length` is unnecessarily small. (same applies for the computation of `salt` as well ([line 33](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L33)), in the case where `salt is None`)
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * self._algorithm.digest_size\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * self._algorithm.digest_size\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * algorithm.digest_size\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5a256a2531a1..5e0c0eb5a412 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Changelog .. note:: This version is not yet released and is under active development. * **BACKWARDS INCOMPATIBLE:** Support for Python 2.6 has been dropped. +* Resolved a bug in ``HKDF`` that incorrectly constrained output size. * Added token rotation support to :doc:`Fernet </fernet>` with :meth:`~cryptography.fernet.MultiFernet.rotate`. diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py index 964ac2cccd56..917b4e9c4cfe 100644 --- a/src/cryptography/hazmat/primitives/kdf/hkdf.py +++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py @@ -67,7 +67,7 @@ def __init__(self, algorithm, length, info, backend): self._backend = backend - max_length = 255 * (algorithm.digest_size // 8) + max_length = 255 * algorithm.digest_size if length > max_length: raise ValueError( diff --git a/tests/hazmat/primitives/test_hkdf.py b/tests/hazmat/primitives/test_hkdf.py index a05fd752ff55..5d2d18676bd9 100644 --- a/tests/hazmat/primitives/test_hkdf.py +++ b/tests/hazmat/primitives/test_hkdf.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function import binascii +import os import pytest @@ -15,13 +16,15 @@ from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand -from ...utils import raises_unsupported_algorithm +from ...utils import ( + load_nist_vectors, load_vectors_from_file, raises_unsupported_algorithm +) @pytest.mark.requires_backend_interface(interface=HMACBackend) class TestHKDF(object): def test_length_limit(self, backend): - big_length = 255 * (hashes.SHA256().digest_size // 8) + 1 + big_length = 255 * hashes.SHA256().digest_size + 1 with pytest.raises(ValueError): HKDF( @@ -153,6 +156,21 @@ def test_derive_short_output(self, backend): assert hkdf.derive(b"\x01" * 16) == b"gJ\xfb{" + def test_derive_long_output(self, backend): + vector = load_vectors_from_file( + os.path.join("KDF", "hkdf-generated.txt"), load_nist_vectors + )[0] + hkdf = HKDF( + hashes.SHA256(), + int(vector["l"]), + salt=vector["salt"], + info=vector["info"], + backend=backend + ) + ikm = binascii.unhexlify(vector["ikm"]) + + assert hkdf.derive(ikm) == binascii.unhexlify(vector["okm"]) + @pytest.mark.requires_backend_interface(interface=HMACBackend) class TestHKDFExpand(object):
internetarchive__openlibrary-8164
IA imports are ignoring *most* high quality MARC data Even when there's a professionally cataloged MARC record associated with an Internet Archive import, none of the information in it is actually being imported. I've added some of the information available which you can see in the diff below (not guaranteed to be exhaustive). This means that the incredibly poor quality metadata from Better World Books (BWB), ie. corrupted title, and Amazon (bad publisher) don't get corrected. It also means that really useful finding information for users like the language and the classification of the edition aren't available. Very strangely, it seems like my viewing the edition actually somehow triggered the Amazon import, which then overwrote the link to the IA MARC record with a link to the Amazon record in the footer of the edition page (fortunately I had the MARC page open in another tab, so I didn't lose it). ### Evidence / Screenshot (if possible) ![Screen Shot 2023-02-10 at 3 46 07 PM](https://user-images.githubusercontent.com/82178/218194378-37a73d59-cb0c-4bf3-b110-ef5bc925d52a.png) ### Relevant url? https://openlibrary.org/show-records/ia:lesnoirsetlesrou0000garl https://openlibrary.org/books/OL43522513M/Les_noirs_et_les_rouges?_compare=Comparer&b=6&a=3&m=diff ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ... 2. Do ... <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: * Expected: All useful information from the MARC record is imported to OpenLibrary and available for use in search. ### Details - **Logged in (Y/N)?** - **Browser type/version?** - **Operating system?** - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug -->
[ { "content": "\"\"\"Module to load books into Open Library.\n\nThis is used to load books from various MARC sources, including\nInternet Archive.\n\nFor loading a book, the available metadata is compiled as a dict,\ncalled a record internally. Here is a sample record:\n\n {\n \"title\": \"The Adventures of Tom Sawyer\",\n \"source_records\": [\"ia:TheAdventuresOfTomSawyer_201303\"],\n \"authors\": [{\n \"name\": \"Mark Twain\"\n }]\n }\n\nThe title and source_records fields are mandatory.\n\nA record is loaded by calling the load function.\n\n record = {...}\n response = load(record)\n\n\"\"\"\nimport re\nfrom typing import TYPE_CHECKING, Any\n\nimport web\n\nfrom collections import defaultdict\nfrom copy import copy\nfrom time import sleep\n\nimport requests\n\nfrom infogami import config\n\nfrom openlibrary import accounts\nfrom openlibrary.catalog.utils import (\n EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS,\n get_publication_year,\n is_independently_published,\n is_promise_item,\n mk_norm,\n needs_isbn_and_lacks_one,\n publication_too_old_and_not_exempt,\n published_in_future_year,\n)\nfrom openlibrary.core import lending\nfrom openlibrary.plugins.upstream.utils import strip_accents\nfrom openlibrary.catalog.utils import expand_record\nfrom openlibrary.utils import uniq, dicthash\nfrom openlibrary.utils.isbn import normalize_isbn\nfrom openlibrary.utils.lccn import normalize_lccn\n\nfrom openlibrary.catalog.add_book.load_book import (\n build_query,\n east_in_by_statement,\n import_author,\n InvalidLanguage,\n)\nfrom openlibrary.catalog.add_book.match import editions_match\n\nif TYPE_CHECKING:\n from openlibrary.plugins.upstream.models import Edition\n\nre_normalize = re.compile('[^[:alphanum:] ]', re.U)\nre_lang = re.compile('^/languages/([a-z]{3})$')\nISBD_UNIT_PUNCT = ' : ' # ISBD cataloging title-unit separator punctuation\n\n\ntype_map = {\n 'description': 'text',\n 'notes': 'text',\n 'number_of_pages': 'int',\n}\n\n\nclass CoverNotSaved(Exception):\n def __init__(self, f):\n self.f = f\n\n def __str__(self):\n return \"coverstore responded with: '%s'\" % self.f\n\n\nclass RequiredField(Exception):\n def __init__(self, f):\n self.f = f\n\n def __str__(self):\n return \"missing required field(s): %s\" % \", \".join(self.f)\n\n\nclass PublicationYearTooOld(Exception):\n def __init__(self, year):\n self.year = year\n\n def __str__(self):\n return f\"publication year is too old (i.e. earlier than {EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS}): {self.year}\"\n\n\nclass PublishedInFutureYear(Exception):\n def __init__(self, year):\n self.year = year\n\n def __str__(self):\n return f\"published in future year: {self.year}\"\n\n\nclass IndependentlyPublished(Exception):\n def __init__(self):\n pass\n\n def __str__(self):\n return \"book is independently published\"\n\n\nclass SourceNeedsISBN(Exception):\n def __init__(self):\n pass\n\n def __str__(self):\n return \"this source needs an ISBN\"\n\n\n# don't use any of these as work titles\nbad_titles = {\n 'Publications',\n 'Works. English',\n 'Missal',\n 'Works',\n 'Report',\n 'Letters',\n 'Calendar',\n 'Bulletin',\n 'Plays',\n 'Sermons',\n 'Correspondence',\n 'Bill',\n 'Bills',\n 'Selections',\n 'Selected works',\n 'Selected works. English',\n 'The Novels',\n 'Laws, etc',\n}\n\nsubject_fields = ['subjects', 'subject_places', 'subject_times', 'subject_people']\n\n\ndef normalize(s):\n \"\"\"Strip non-alphanums and truncate at 25 chars.\"\"\"\n norm = strip_accents(s).lower()\n norm = norm.replace(' and ', ' ')\n if norm.startswith('the '):\n norm = norm[4:]\n elif norm.startswith('a '):\n norm = norm[2:]\n # strip bracketed text\n norm = re.sub(r' ?\\(.*\\)', '', norm)\n return norm.replace(' ', '')[:25]\n\n\ndef is_redirect(thing):\n \"\"\"\n :param Thing thing:\n :rtype: bool\n \"\"\"\n if not thing:\n return False\n return thing.type.key == '/type/redirect'\n\n\ndef get_title(e):\n if not e.get('work_titles'):\n return e['title']\n wt = e['work_titles'][0]\n return e['title'] if wt in bad_titles else e['title']\n\n\ndef split_subtitle(full_title):\n \"\"\"\n Splits a title into (title, subtitle),\n strips parenthetical tags. Used for bookseller\n catalogs which do not pre-separate subtitles.\n\n :param str full_title:\n :rtype: (str, str | None)\n :return: (title, subtitle | None)\n \"\"\"\n\n # strip parenthetical blocks wherever they occur\n # can handle 1 level of nesting\n re_parens_strip = re.compile(r'\\(([^\\)\\(]*|[^\\(]*\\([^\\)]*\\)[^\\)]*)\\)')\n clean_title = re.sub(re_parens_strip, '', full_title)\n\n titles = clean_title.split(':')\n subtitle = titles.pop().strip() if len(titles) > 1 else None\n title = ISBD_UNIT_PUNCT.join([unit.strip() for unit in titles])\n return (title, subtitle)\n\n\ndef find_matching_work(e):\n \"\"\"\n Looks for an existing Work representing the new import edition by\n comparing normalized titles for every work by each author of the current edition.\n Returns the first match found, or None.\n\n :param dict e: An OL edition suitable for saving, has a key, and has full Authors with keys\n but has not yet been saved.\n :rtype: None or str\n :return: the matched work key \"/works/OL..W\" if found\n \"\"\"\n\n norm_title = mk_norm(get_title(e))\n seen = set()\n for a in e['authors']:\n q = {'type': '/type/work', 'authors': {'author': {'key': a['key']}}}\n work_keys = list(web.ctx.site.things(q))\n for wkey in work_keys:\n w = web.ctx.site.get(wkey)\n if wkey in seen:\n continue\n seen.add(wkey)\n if not w.get('title'):\n continue\n if mk_norm(w['title']) == norm_title:\n assert w.type.key == '/type/work'\n return wkey\n\n\ndef build_author_reply(authors_in, edits, source):\n \"\"\"\n Steps through an import record's authors, and creates new records if new,\n adding them to 'edits' to be saved later.\n\n :param list authors_in: import author dicts [{\"name:\" \"Bob\"}, ...], maybe dates\n :param list edits: list of Things to be saved later. Is modified by this method.\n :param str source: Source record e.g. marc:marc_ex/part01.dat:26456929:680\n :rtype: tuple\n :return: (list, list) authors [{\"key\": \"/author/OL..A\"}, ...], author_reply\n \"\"\"\n\n authors = []\n author_reply = []\n for a in authors_in:\n new_author = 'key' not in a\n if new_author:\n a['key'] = web.ctx.site.new_key('/type/author')\n a['source_records'] = [source]\n edits.append(a)\n authors.append({'key': a['key']})\n author_reply.append(\n {\n 'key': a['key'],\n 'name': a['name'],\n 'status': ('created' if new_author else 'matched'),\n }\n )\n return (authors, author_reply)\n\n\ndef new_work(edition, rec, cover_id=None):\n \"\"\"\n :param dict edition: New OL Edition\n :param dict rec: Edition import data\n :param (int|None) cover_id: cover id\n :rtype: dict\n :return: a work to save\n \"\"\"\n w = {\n 'type': {'key': '/type/work'},\n 'title': get_title(rec),\n }\n for s in subject_fields:\n if s in rec:\n w[s] = rec[s]\n\n if 'authors' in edition:\n w['authors'] = [\n {'type': {'key': '/type/author_role'}, 'author': akey}\n for akey in edition['authors']\n ]\n\n if 'description' in rec:\n w['description'] = {'type': '/type/text', 'value': rec['description']}\n\n wkey = web.ctx.site.new_key('/type/work')\n if edition.get('covers'):\n w['covers'] = edition['covers']\n w['key'] = wkey\n return w\n\n\ndef add_cover(cover_url, ekey, account_key=None):\n \"\"\"\n Adds a cover to coverstore and returns the cover id.\n\n :param str cover_url: URL of cover image\n :param str ekey: Edition key /book/OL..M\n :rtype: int or None\n :return: Cover id, or None if upload did not succeed\n \"\"\"\n olid = ekey.split('/')[-1]\n coverstore_url = config.get('coverstore_url').rstrip('/')\n upload_url = coverstore_url + '/b/upload2'\n if upload_url.startswith('//'):\n upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)\n if not account_key:\n user = accounts.get_current_user()\n if not user:\n raise RuntimeError(\"accounts.get_current_user() failed\")\n account_key = user.get('key') or user.get('_key')\n params = {\n 'author': account_key,\n 'data': None,\n 'source_url': cover_url,\n 'olid': olid,\n 'ip': web.ctx.ip,\n }\n reply = None\n for attempt in range(10):\n try:\n payload = requests.compat.urlencode(params).encode('utf-8')\n response = requests.post(upload_url, data=payload)\n except requests.HTTPError:\n sleep(2)\n continue\n body = response.text\n if response.status_code == 500:\n raise CoverNotSaved(body)\n if body not in ['', 'None']:\n reply = response.json()\n if response.status_code == 200 and 'id' in reply:\n break\n sleep(2)\n if not reply or reply.get('message') == 'Invalid URL':\n return\n cover_id = int(reply['id'])\n return cover_id\n\n\ndef get_ia_item(ocaid):\n import internetarchive as ia\n\n cfg = {'general': {'secure': False}}\n item = ia.get_item(ocaid, config=cfg)\n return item\n\n\ndef modify_ia_item(item, data):\n access_key = (\n lending.config_ia_ol_metadata_write_s3\n and lending.config_ia_ol_metadata_write_s3['s3_key']\n )\n secret_key = (\n lending.config_ia_ol_metadata_write_s3\n and lending.config_ia_ol_metadata_write_s3['s3_secret']\n )\n return item.modify_metadata(data, access_key=access_key, secret_key=secret_key)\n\n\ndef create_ol_subjects_for_ocaid(ocaid, subjects):\n item = get_ia_item(ocaid)\n openlibrary_subjects = copy(item.metadata.get('openlibrary_subject')) or []\n\n if not isinstance(openlibrary_subjects, list):\n openlibrary_subjects = [openlibrary_subjects]\n\n for subject in subjects:\n if subject not in openlibrary_subjects:\n openlibrary_subjects.append(subject)\n\n r = modify_ia_item(item, {'openlibrary_subject': openlibrary_subjects})\n if r.status_code != 200:\n return f'{item.identifier} failed: {r.content}'\n else:\n return \"success for %s\" % item.identifier\n\n\ndef update_ia_metadata_for_ol_edition(edition_id):\n \"\"\"\n Writes the Open Library Edition and Work id to a linked\n archive.org item.\n\n :param str edition_id: of the form OL..M\n :rtype: dict\n :return: error report, or modified archive.org metadata on success\n \"\"\"\n\n data = {'error': 'No qualifying edition'}\n if edition_id:\n ed = web.ctx.site.get('/books/%s' % edition_id)\n if ed.ocaid:\n work = ed.works[0] if ed.get('works') else None\n if work and work.key:\n item = get_ia_item(ed.ocaid)\n work_id = work.key.split('/')[2]\n r = modify_ia_item(\n item,\n {'openlibrary_work': work_id, 'openlibrary_edition': edition_id},\n )\n if r.status_code != 200:\n data = {'error': f'{item.identifier} failed: {r.content}'}\n else:\n data = item.metadata\n return data\n\n\ndef normalize_record_bibids(rec):\n \"\"\"\n Returns the Edition import record with all ISBN fields and LCCNs cleaned.\n\n :param dict rec: Edition import record\n :rtype: dict\n :return: A record with cleaned LCCNs, and ISBNs in the various possible ISBN locations.\n \"\"\"\n for field in ('isbn_13', 'isbn_10', 'isbn'):\n if rec.get(field):\n rec[field] = [\n normalize_isbn(isbn) for isbn in rec.get(field) if normalize_isbn(isbn)\n ]\n if rec.get('lccn'):\n rec['lccn'] = [\n normalize_lccn(lccn) for lccn in rec.get('lccn') if normalize_lccn(lccn)\n ]\n return rec\n\n\ndef isbns_from_record(rec):\n \"\"\"\n Returns a list of all isbns from the various possible isbn fields.\n\n :param dict rec: Edition import record\n :rtype: list\n \"\"\"\n isbns = rec.get('isbn', []) + rec.get('isbn_10', []) + rec.get('isbn_13', [])\n return isbns\n\n\ndef build_pool(rec):\n \"\"\"\n Searches for existing edition matches on title and bibliographic keys.\n\n :param dict rec: Edition record\n :rtype: dict\n :return: {<identifier: title | isbn | lccn etc>: [list of /books/OL..M keys that match rec on <identifier>]}\n \"\"\"\n pool = defaultdict(set)\n match_fields = ('title', 'oclc_numbers', 'lccn', 'ocaid')\n\n # Find records with matching fields\n for field in match_fields:\n pool[field] = set(editions_matched(rec, field))\n\n # update title pool with normalized title matches\n pool['title'].update(\n set(editions_matched(rec, 'normalized_title_', normalize(rec['title'])))\n )\n\n # Find records with matching ISBNs\n if isbns := isbns_from_record(rec):\n pool['isbn'] = set(editions_matched(rec, 'isbn_', isbns))\n\n return {k: list(v) for k, v in pool.items() if v}\n\n\ndef find_quick_match(rec):\n \"\"\"\n Attempts to quickly find an existing item match using bibliographic keys.\n\n :param dict rec: Edition record\n :rtype: str|bool\n :return: First key matched of format \"/books/OL..M\" or False if no match found.\n \"\"\"\n\n if 'openlibrary' in rec:\n return '/books/' + rec['openlibrary']\n\n ekeys = editions_matched(rec, 'ocaid')\n if ekeys:\n return ekeys[0]\n\n if isbns := isbns_from_record(rec):\n ekeys = editions_matched(rec, 'isbn_', isbns)\n if ekeys:\n return ekeys[0]\n\n # only searches for the first value from these lists\n for f in 'source_records', 'oclc_numbers', 'lccn':\n if rec.get(f):\n if f == 'source_records' and not rec[f][0].startswith('ia:'):\n continue\n ekeys = editions_matched(rec, f, rec[f][0])\n if ekeys:\n return ekeys[0]\n return False\n\n\ndef editions_matched(rec, key, value=None):\n \"\"\"\n Search OL for editions matching record's 'key' value.\n\n :param dict rec: Edition import record\n :param str key: Key to search on, e.g. 'isbn_'\n :param list|str value: Value or Values to use, overriding record values\n :rtpye: list\n :return: List of edition keys [\"/books/OL..M\",]\n \"\"\"\n if value is None and key not in rec:\n return []\n\n if value is None:\n value = rec[key]\n q = {'type': '/type/edition', key: value}\n ekeys = list(web.ctx.site.things(q))\n return ekeys\n\n\ndef find_exact_match(rec, edition_pool):\n \"\"\"\n Returns an edition key match for rec from edition_pool\n Only returns a key if all values match?\n\n :param dict rec: Edition import record\n :param dict edition_pool:\n :rtype: str|bool\n :return: edition key\n \"\"\"\n seen = set()\n for editions in edition_pool.values():\n for ekey in editions:\n if ekey in seen:\n continue\n seen.add(ekey)\n existing = web.ctx.site.get(ekey)\n match = True\n for k, v in rec.items():\n if k == 'source_records':\n continue\n existing_value = existing.get(k)\n if not existing_value:\n continue\n if k == 'languages':\n existing_value = [\n str(re_lang.match(lang.key).group(1)) for lang in existing_value\n ]\n if k == 'authors':\n existing_value = [dict(a) for a in existing_value]\n for a in existing_value:\n del a['type']\n del a['key']\n for a in v:\n if 'entity_type' in a:\n del a['entity_type']\n if 'db_name' in a:\n del a['db_name']\n\n if existing_value != v:\n match = False\n break\n if match:\n return ekey\n return False\n\n\ndef find_enriched_match(rec, edition_pool):\n \"\"\"\n Find the best match for rec in edition_pool and return its key.\n :param dict rec: the new edition we are trying to match.\n :param list edition_pool: list of possible edition key matches, output of build_pool(import record)\n :rtype: str|None\n :return: None or the edition key '/books/OL...M' of the best edition match for enriched_rec in edition_pool\n \"\"\"\n enriched_rec = expand_record(rec)\n add_db_name(enriched_rec)\n\n seen = set()\n for edition_keys in edition_pool.values():\n for edition_key in edition_keys:\n if edition_key in seen:\n continue\n thing = None\n found = True\n while not thing or is_redirect(thing):\n seen.add(edition_key)\n thing = web.ctx.site.get(edition_key)\n if thing is None:\n found = False\n break\n if is_redirect(thing):\n edition_key = thing['location']\n # FIXME: this updates edition_key, but leaves thing as redirect,\n # which will raise an exception in editions_match()\n if not found:\n continue\n if editions_match(enriched_rec, thing):\n return edition_key\n\n\ndef add_db_name(rec: dict) -> None:\n \"\"\"\n db_name = Author name followed by dates.\n adds 'db_name' in place for each author.\n \"\"\"\n if 'authors' not in rec:\n return\n\n for a in rec['authors'] or []:\n date = None\n if 'date' in a:\n assert 'birth_date' not in a\n assert 'death_date' not in a\n date = a['date']\n elif 'birth_date' in a or 'death_date' in a:\n date = a.get('birth_date', '') + '-' + a.get('death_date', '')\n a['db_name'] = ' '.join([a['name'], date]) if date else a['name']\n\n\ndef load_data(rec, account_key=None):\n \"\"\"\n Adds a new Edition to Open Library. Checks for existing Works.\n Creates a new Work, and Author, if required,\n otherwise associates the new Edition with the existing Work.\n\n :param dict rec: Edition record to add (no further checks at this point)\n :rtype: dict\n :return:\n {\n \"success\": False,\n \"error\": <error msg>\n }\n OR\n {\n \"success\": True,\n \"work\": {\"key\": <key>, \"status\": \"created\" | \"modified\" | \"matched\"},\n \"edition\": {\"key\": <key>, \"status\": \"created\"},\n \"authors\": [{\"status\": \"matched\", \"name\": \"John Smith\", \"key\": <key>}, ...]\n }\n \"\"\"\n\n cover_url = None\n if 'cover' in rec:\n cover_url = rec['cover']\n del rec['cover']\n try:\n # get an OL style edition dict\n edition = build_query(rec)\n except InvalidLanguage as e:\n return {\n 'success': False,\n 'error': str(e),\n }\n\n ekey = web.ctx.site.new_key('/type/edition')\n cover_id = None\n if cover_url:\n cover_id = add_cover(cover_url, ekey, account_key=account_key)\n if cover_id:\n edition['covers'] = [cover_id]\n\n edits = [] # Things (Edition, Work, Authors) to be saved\n reply = {}\n # TOFIX: edition.authors has already been processed by import_authors() in build_query(), following line is a NOP?\n author_in = [\n import_author(a, eastern=east_in_by_statement(rec, a))\n for a in edition.get('authors', [])\n ]\n # build_author_reply() adds authors to edits\n (authors, author_reply) = build_author_reply(\n author_in, edits, rec['source_records'][0]\n )\n\n if authors:\n edition['authors'] = authors\n reply['authors'] = author_reply\n\n wkey = None\n work_state = 'created'\n # Look for an existing work\n if 'authors' in edition:\n wkey = find_matching_work(edition)\n if wkey:\n w = web.ctx.site.get(wkey)\n work_state = 'matched'\n found_wkey_match = True\n need_update = False\n for k in subject_fields:\n if k not in rec:\n continue\n for s in rec[k]:\n if normalize(s) not in [\n normalize(existing) for existing in w.get(k, [])\n ]:\n w.setdefault(k, []).append(s)\n need_update = True\n if cover_id:\n w.setdefault('covers', []).append(cover_id)\n need_update = True\n if need_update:\n work_state = 'modified'\n edits.append(w.dict())\n else:\n # Create new work\n w = new_work(edition, rec, cover_id)\n wkey = w['key']\n edits.append(w)\n\n assert wkey\n edition['works'] = [{'key': wkey}]\n edition['key'] = ekey\n edits.append(edition)\n\n web.ctx.site.save_many(edits, comment='import new book', action='add-book')\n\n # Writes back `openlibrary_edition` and `openlibrary_work` to\n # archive.org item after successful import:\n if 'ocaid' in rec:\n update_ia_metadata_for_ol_edition(ekey.split('/')[-1])\n\n reply['success'] = True\n reply['edition'] = {'key': ekey, 'status': 'created'}\n reply['work'] = {'key': wkey, 'status': work_state}\n return reply\n\n\ndef normalize_import_record(rec: dict) -> None:\n \"\"\"\n Normalize the import record by:\n - Verifying required fields\n - Ensuring source_records is a list\n - Splitting subtitles out of the title field\n - Cleaning all ISBN and LCCN fields ('bibids'), and\n - Deduplicate authors.\n\n NOTE: This function modifies the passed-in rec in place.\n \"\"\"\n required_fields = [\n 'title',\n 'source_records',\n ] # ['authors', 'publishers', 'publish_date']\n for field in required_fields:\n if not rec.get(field):\n raise RequiredField(field)\n\n # Ensure source_records is a list.\n if not isinstance(rec['source_records'], list):\n rec['source_records'] = [rec['source_records']]\n\n # Split subtitle if required and not already present\n if ':' in rec.get('title', '') and not rec.get('subtitle'):\n title, subtitle = split_subtitle(rec.get('title'))\n if subtitle:\n rec['title'] = title\n rec['subtitle'] = subtitle\n\n rec = normalize_record_bibids(rec)\n\n # deduplicate authors\n rec['authors'] = uniq(rec.get('authors', []), dicthash)\n\n\ndef validate_record(rec: dict) -> None:\n \"\"\"\n Check for:\n - publication years too old from non-exempt sources (e.g. Amazon);\n - publish dates in a future year;\n - independently published books; and\n - books that need an ISBN and lack one.\n\n Each check raises an error or returns None.\n\n If all the validations pass, implicitly return None.\n \"\"\"\n # Only validate publication year if a year is found.\n if publication_year := get_publication_year(rec.get('publish_date')):\n if publication_too_old_and_not_exempt(rec):\n raise PublicationYearTooOld(publication_year)\n elif published_in_future_year(publication_year):\n raise PublishedInFutureYear(publication_year)\n\n if is_independently_published(rec.get('publishers', [])):\n raise IndependentlyPublished\n\n if needs_isbn_and_lacks_one(rec):\n raise SourceNeedsISBN\n\n\ndef find_match(rec, edition_pool) -> str | None:\n \"\"\"Use rec to try to find an existing edition key that matches.\"\"\"\n match = find_quick_match(rec)\n if not match:\n match = find_exact_match(rec, edition_pool)\n\n if not match:\n # Add 'full_title' to the rec by conjoining 'title' and 'subtitle'.\n # expand_record() uses this for matching.\n rec['full_title'] = rec['title']\n if subtitle := rec.get('subtitle'):\n rec['full_title'] += ' ' + subtitle\n\n match = find_enriched_match(rec, edition_pool)\n\n return match\n\n\ndef update_edition_with_rec_data(\n rec: dict, account_key: str | None, edition: \"Edition\"\n) -> bool:\n \"\"\"\n Enrich the Edition by adding certain fields present in rec but absent\n in edition.\n\n NOTE: This modifies the passed-in Edition in place.\n \"\"\"\n need_edition_save = False\n # Add cover to edition\n if 'cover' in rec and not edition.get_covers():\n cover_url = rec['cover']\n cover_id = add_cover(cover_url, edition.key, account_key=account_key)\n if cover_id:\n edition['covers'] = [cover_id]\n need_edition_save = True\n\n # Add ocaid to edition (str), if needed\n if 'ocaid' in rec and not edition.ocaid:\n edition['ocaid'] = rec['ocaid']\n need_edition_save = True\n\n # Add list fields to edition as needed\n edition_list_fields = [\n 'local_id',\n 'lccn',\n 'lc_classifications',\n 'oclc_numbers',\n 'source_records',\n ]\n for f in edition_list_fields:\n if f not in rec or not rec[f]:\n continue\n # ensure values is a list\n values = rec[f] if isinstance(rec[f], list) else [rec[f]]\n if f in edition:\n # get values from rec that are not currently on the edition\n to_add = [v for v in values if v not in edition[f]]\n edition[f] += to_add\n else:\n edition[f] = to_add = values\n if to_add:\n need_edition_save = True\n\n other_edition_fields = [\n 'number_of_pages',\n 'publishers',\n 'publish_date',\n ]\n for f in other_edition_fields:\n if f not in rec or not rec[f]:\n continue\n if f not in edition:\n edition[f] = rec[f]\n need_edition_save = True\n\n # Add new identifiers\n if 'identifiers' in rec:\n identifiers = defaultdict(list, edition.dict().get('identifiers', {}))\n for k, vals in rec['identifiers'].items():\n identifiers[k].extend(vals)\n identifiers[k] = list(set(identifiers[k]))\n if edition.dict().get('identifiers') != identifiers:\n edition['identifiers'] = identifiers\n need_edition_save = True\n\n return need_edition_save\n\n\ndef update_work_with_rec_data(\n rec: dict, edition: \"Edition\", work: dict[str, Any], need_work_save: bool\n) -> bool:\n \"\"\"\n Enrich the Work by adding certain fields present in rec but absent\n in work.\n\n NOTE: This modifies the passed-in Work in place.\n \"\"\"\n # Add subjects to work, if not already present\n if 'subjects' in rec:\n work_subjects = list(work.get('subjects', []))\n for s in rec['subjects']:\n if s not in work_subjects:\n work_subjects.append(s)\n need_work_save = True\n if need_work_save and work_subjects:\n work['subjects'] = work_subjects\n\n # Add cover to work, if needed\n if not work.get('covers') and edition.get_covers():\n work['covers'] = [edition['covers'][0]]\n need_work_save = True\n\n # Add description to work, if needed\n if not work.get('description') and edition.get('description'):\n work['description'] = edition['description']\n need_work_save = True\n\n # Add authors to work, if needed\n if not work.get('authors'):\n authors = [import_author(a) for a in rec.get('authors', [])]\n work['authors'] = [\n {'type': {'key': '/type/author_role'}, 'author': a.key}\n for a in authors\n if a.get('key')\n ]\n if work.get('authors'):\n need_work_save = True\n\n return need_work_save\n\n\ndef load(rec, account_key=None):\n \"\"\"Given a record, tries to add/match that edition in the system.\n\n Record is a dictionary containing all the metadata of the edition.\n The following fields are mandatory:\n\n * title: str\n * source_records: list\n\n :param dict rec: Edition record to add\n :rtype: dict\n :return: a dict to be converted into a JSON HTTP response, same as load_data()\n \"\"\"\n if not is_promise_item(rec):\n validate_record(rec)\n normalize_import_record(rec)\n\n # Resolve an edition if possible, or create and return one if not.\n\n edition_pool = build_pool(rec)\n if not edition_pool:\n # No match candidates found, add edition\n return load_data(rec, account_key=account_key)\n\n match = find_match(rec, edition_pool)\n if not match:\n # No match found, add edition\n return load_data(rec, account_key=account_key)\n\n # We have an edition match at this point\n need_work_save = need_edition_save = False\n work: dict[str, Any]\n edition: Edition = web.ctx.site.get(match)\n # check for, and resolve, author redirects\n for a in edition.authors:\n while is_redirect(a):\n if a in edition.authors:\n edition.authors.remove(a)\n a = web.ctx.site.get(a.location)\n if not is_redirect(a):\n edition.authors.append(a)\n\n if edition.get('works'):\n work = edition.works[0].dict()\n work_created = False\n else:\n # Found an edition without a work\n work_created = need_work_save = need_edition_save = True\n work = new_work(edition.dict(), rec)\n edition.works = [{'key': work['key']}]\n\n need_edition_save = update_edition_with_rec_data(\n rec=rec, account_key=account_key, edition=edition\n )\n need_work_save = update_work_with_rec_data(\n rec=rec, edition=edition, work=work, need_work_save=need_work_save\n )\n\n edits = []\n reply = {\n 'success': True,\n 'edition': {'key': match, 'status': 'matched'},\n 'work': {'key': work['key'], 'status': 'matched'},\n }\n if need_edition_save:\n reply['edition']['status'] = 'modified'\n edits.append(edition.dict())\n if need_work_save:\n reply['work']['status'] = 'created' if work_created else 'modified'\n edits.append(work)\n if edits:\n web.ctx.site.save_many(\n edits, comment='import existing book', action='edit-book'\n )\n if 'ocaid' in rec:\n update_ia_metadata_for_ol_edition(match.split('/')[-1])\n return reply\n", "path": "openlibrary/catalog/add_book/__init__.py" } ]
[ { "content": "\"\"\"Module to load books into Open Library.\n\nThis is used to load books from various MARC sources, including\nInternet Archive.\n\nFor loading a book, the available metadata is compiled as a dict,\ncalled a record internally. Here is a sample record:\n\n {\n \"title\": \"The Adventures of Tom Sawyer\",\n \"source_records\": [\"ia:TheAdventuresOfTomSawyer_201303\"],\n \"authors\": [{\n \"name\": \"Mark Twain\"\n }]\n }\n\nThe title and source_records fields are mandatory.\n\nA record is loaded by calling the load function.\n\n record = {...}\n response = load(record)\n\n\"\"\"\nimport re\nfrom typing import TYPE_CHECKING, Any\n\nimport web\n\nfrom collections import defaultdict\nfrom copy import copy\nfrom time import sleep\n\nimport requests\n\nfrom infogami import config\n\nfrom openlibrary import accounts\nfrom openlibrary.catalog.utils import (\n EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS,\n get_publication_year,\n is_independently_published,\n is_promise_item,\n mk_norm,\n needs_isbn_and_lacks_one,\n publication_too_old_and_not_exempt,\n published_in_future_year,\n)\nfrom openlibrary.core import lending\nfrom openlibrary.plugins.upstream.utils import strip_accents\nfrom openlibrary.catalog.utils import expand_record\nfrom openlibrary.utils import uniq, dicthash\nfrom openlibrary.utils.isbn import normalize_isbn\nfrom openlibrary.utils.lccn import normalize_lccn\n\nfrom openlibrary.catalog.add_book.load_book import (\n build_query,\n east_in_by_statement,\n import_author,\n InvalidLanguage,\n)\nfrom openlibrary.catalog.add_book.match import editions_match\n\nif TYPE_CHECKING:\n from openlibrary.plugins.upstream.models import Edition\n\nre_normalize = re.compile('[^[:alphanum:] ]', re.U)\nre_lang = re.compile('^/languages/([a-z]{3})$')\nISBD_UNIT_PUNCT = ' : ' # ISBD cataloging title-unit separator punctuation\n\n\ntype_map = {\n 'description': 'text',\n 'notes': 'text',\n 'number_of_pages': 'int',\n}\n\n\nclass CoverNotSaved(Exception):\n def __init__(self, f):\n self.f = f\n\n def __str__(self):\n return \"coverstore responded with: '%s'\" % self.f\n\n\nclass RequiredField(Exception):\n def __init__(self, f):\n self.f = f\n\n def __str__(self):\n return \"missing required field(s): %s\" % \", \".join(self.f)\n\n\nclass PublicationYearTooOld(Exception):\n def __init__(self, year):\n self.year = year\n\n def __str__(self):\n return f\"publication year is too old (i.e. earlier than {EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS}): {self.year}\"\n\n\nclass PublishedInFutureYear(Exception):\n def __init__(self, year):\n self.year = year\n\n def __str__(self):\n return f\"published in future year: {self.year}\"\n\n\nclass IndependentlyPublished(Exception):\n def __init__(self):\n pass\n\n def __str__(self):\n return \"book is independently published\"\n\n\nclass SourceNeedsISBN(Exception):\n def __init__(self):\n pass\n\n def __str__(self):\n return \"this source needs an ISBN\"\n\n\n# don't use any of these as work titles\nbad_titles = {\n 'Publications',\n 'Works. English',\n 'Missal',\n 'Works',\n 'Report',\n 'Letters',\n 'Calendar',\n 'Bulletin',\n 'Plays',\n 'Sermons',\n 'Correspondence',\n 'Bill',\n 'Bills',\n 'Selections',\n 'Selected works',\n 'Selected works. English',\n 'The Novels',\n 'Laws, etc',\n}\n\nsubject_fields = ['subjects', 'subject_places', 'subject_times', 'subject_people']\n\n\ndef normalize(s):\n \"\"\"Strip non-alphanums and truncate at 25 chars.\"\"\"\n norm = strip_accents(s).lower()\n norm = norm.replace(' and ', ' ')\n if norm.startswith('the '):\n norm = norm[4:]\n elif norm.startswith('a '):\n norm = norm[2:]\n # strip bracketed text\n norm = re.sub(r' ?\\(.*\\)', '', norm)\n return norm.replace(' ', '')[:25]\n\n\ndef is_redirect(thing):\n \"\"\"\n :param Thing thing:\n :rtype: bool\n \"\"\"\n if not thing:\n return False\n return thing.type.key == '/type/redirect'\n\n\ndef get_title(e):\n if not e.get('work_titles'):\n return e['title']\n wt = e['work_titles'][0]\n return e['title'] if wt in bad_titles else e['title']\n\n\ndef split_subtitle(full_title):\n \"\"\"\n Splits a title into (title, subtitle),\n strips parenthetical tags. Used for bookseller\n catalogs which do not pre-separate subtitles.\n\n :param str full_title:\n :rtype: (str, str | None)\n :return: (title, subtitle | None)\n \"\"\"\n\n # strip parenthetical blocks wherever they occur\n # can handle 1 level of nesting\n re_parens_strip = re.compile(r'\\(([^\\)\\(]*|[^\\(]*\\([^\\)]*\\)[^\\)]*)\\)')\n clean_title = re.sub(re_parens_strip, '', full_title)\n\n titles = clean_title.split(':')\n subtitle = titles.pop().strip() if len(titles) > 1 else None\n title = ISBD_UNIT_PUNCT.join([unit.strip() for unit in titles])\n return (title, subtitle)\n\n\ndef find_matching_work(e):\n \"\"\"\n Looks for an existing Work representing the new import edition by\n comparing normalized titles for every work by each author of the current edition.\n Returns the first match found, or None.\n\n :param dict e: An OL edition suitable for saving, has a key, and has full Authors with keys\n but has not yet been saved.\n :rtype: None or str\n :return: the matched work key \"/works/OL..W\" if found\n \"\"\"\n\n norm_title = mk_norm(get_title(e))\n seen = set()\n for a in e['authors']:\n q = {'type': '/type/work', 'authors': {'author': {'key': a['key']}}}\n work_keys = list(web.ctx.site.things(q))\n for wkey in work_keys:\n w = web.ctx.site.get(wkey)\n if wkey in seen:\n continue\n seen.add(wkey)\n if not w.get('title'):\n continue\n if mk_norm(w['title']) == norm_title:\n assert w.type.key == '/type/work'\n return wkey\n\n\ndef build_author_reply(authors_in, edits, source):\n \"\"\"\n Steps through an import record's authors, and creates new records if new,\n adding them to 'edits' to be saved later.\n\n :param list authors_in: import author dicts [{\"name:\" \"Bob\"}, ...], maybe dates\n :param list edits: list of Things to be saved later. Is modified by this method.\n :param str source: Source record e.g. marc:marc_ex/part01.dat:26456929:680\n :rtype: tuple\n :return: (list, list) authors [{\"key\": \"/author/OL..A\"}, ...], author_reply\n \"\"\"\n\n authors = []\n author_reply = []\n for a in authors_in:\n new_author = 'key' not in a\n if new_author:\n a['key'] = web.ctx.site.new_key('/type/author')\n a['source_records'] = [source]\n edits.append(a)\n authors.append({'key': a['key']})\n author_reply.append(\n {\n 'key': a['key'],\n 'name': a['name'],\n 'status': ('created' if new_author else 'matched'),\n }\n )\n return (authors, author_reply)\n\n\ndef new_work(edition, rec, cover_id=None):\n \"\"\"\n :param dict edition: New OL Edition\n :param dict rec: Edition import data\n :param (int|None) cover_id: cover id\n :rtype: dict\n :return: a work to save\n \"\"\"\n w = {\n 'type': {'key': '/type/work'},\n 'title': get_title(rec),\n }\n for s in subject_fields:\n if s in rec:\n w[s] = rec[s]\n\n if 'authors' in edition:\n w['authors'] = [\n {'type': {'key': '/type/author_role'}, 'author': akey}\n for akey in edition['authors']\n ]\n\n if 'description' in rec:\n w['description'] = {'type': '/type/text', 'value': rec['description']}\n\n wkey = web.ctx.site.new_key('/type/work')\n if edition.get('covers'):\n w['covers'] = edition['covers']\n w['key'] = wkey\n return w\n\n\ndef add_cover(cover_url, ekey, account_key=None):\n \"\"\"\n Adds a cover to coverstore and returns the cover id.\n\n :param str cover_url: URL of cover image\n :param str ekey: Edition key /book/OL..M\n :rtype: int or None\n :return: Cover id, or None if upload did not succeed\n \"\"\"\n olid = ekey.split('/')[-1]\n coverstore_url = config.get('coverstore_url').rstrip('/')\n upload_url = coverstore_url + '/b/upload2'\n if upload_url.startswith('//'):\n upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)\n if not account_key:\n user = accounts.get_current_user()\n if not user:\n raise RuntimeError(\"accounts.get_current_user() failed\")\n account_key = user.get('key') or user.get('_key')\n params = {\n 'author': account_key,\n 'data': None,\n 'source_url': cover_url,\n 'olid': olid,\n 'ip': web.ctx.ip,\n }\n reply = None\n for attempt in range(10):\n try:\n payload = requests.compat.urlencode(params).encode('utf-8')\n response = requests.post(upload_url, data=payload)\n except requests.HTTPError:\n sleep(2)\n continue\n body = response.text\n if response.status_code == 500:\n raise CoverNotSaved(body)\n if body not in ['', 'None']:\n reply = response.json()\n if response.status_code == 200 and 'id' in reply:\n break\n sleep(2)\n if not reply or reply.get('message') == 'Invalid URL':\n return\n cover_id = int(reply['id'])\n return cover_id\n\n\ndef get_ia_item(ocaid):\n import internetarchive as ia\n\n cfg = {'general': {'secure': False}}\n item = ia.get_item(ocaid, config=cfg)\n return item\n\n\ndef modify_ia_item(item, data):\n access_key = (\n lending.config_ia_ol_metadata_write_s3\n and lending.config_ia_ol_metadata_write_s3['s3_key']\n )\n secret_key = (\n lending.config_ia_ol_metadata_write_s3\n and lending.config_ia_ol_metadata_write_s3['s3_secret']\n )\n return item.modify_metadata(data, access_key=access_key, secret_key=secret_key)\n\n\ndef create_ol_subjects_for_ocaid(ocaid, subjects):\n item = get_ia_item(ocaid)\n openlibrary_subjects = copy(item.metadata.get('openlibrary_subject')) or []\n\n if not isinstance(openlibrary_subjects, list):\n openlibrary_subjects = [openlibrary_subjects]\n\n for subject in subjects:\n if subject not in openlibrary_subjects:\n openlibrary_subjects.append(subject)\n\n r = modify_ia_item(item, {'openlibrary_subject': openlibrary_subjects})\n if r.status_code != 200:\n return f'{item.identifier} failed: {r.content}'\n else:\n return \"success for %s\" % item.identifier\n\n\ndef update_ia_metadata_for_ol_edition(edition_id):\n \"\"\"\n Writes the Open Library Edition and Work id to a linked\n archive.org item.\n\n :param str edition_id: of the form OL..M\n :rtype: dict\n :return: error report, or modified archive.org metadata on success\n \"\"\"\n\n data = {'error': 'No qualifying edition'}\n if edition_id:\n ed = web.ctx.site.get('/books/%s' % edition_id)\n if ed.ocaid:\n work = ed.works[0] if ed.get('works') else None\n if work and work.key:\n item = get_ia_item(ed.ocaid)\n work_id = work.key.split('/')[2]\n r = modify_ia_item(\n item,\n {'openlibrary_work': work_id, 'openlibrary_edition': edition_id},\n )\n if r.status_code != 200:\n data = {'error': f'{item.identifier} failed: {r.content}'}\n else:\n data = item.metadata\n return data\n\n\ndef normalize_record_bibids(rec):\n \"\"\"\n Returns the Edition import record with all ISBN fields and LCCNs cleaned.\n\n :param dict rec: Edition import record\n :rtype: dict\n :return: A record with cleaned LCCNs, and ISBNs in the various possible ISBN locations.\n \"\"\"\n for field in ('isbn_13', 'isbn_10', 'isbn'):\n if rec.get(field):\n rec[field] = [\n normalize_isbn(isbn) for isbn in rec.get(field) if normalize_isbn(isbn)\n ]\n if rec.get('lccn'):\n rec['lccn'] = [\n normalize_lccn(lccn) for lccn in rec.get('lccn') if normalize_lccn(lccn)\n ]\n return rec\n\n\ndef isbns_from_record(rec):\n \"\"\"\n Returns a list of all isbns from the various possible isbn fields.\n\n :param dict rec: Edition import record\n :rtype: list\n \"\"\"\n isbns = rec.get('isbn', []) + rec.get('isbn_10', []) + rec.get('isbn_13', [])\n return isbns\n\n\ndef build_pool(rec):\n \"\"\"\n Searches for existing edition matches on title and bibliographic keys.\n\n :param dict rec: Edition record\n :rtype: dict\n :return: {<identifier: title | isbn | lccn etc>: [list of /books/OL..M keys that match rec on <identifier>]}\n \"\"\"\n pool = defaultdict(set)\n match_fields = ('title', 'oclc_numbers', 'lccn', 'ocaid')\n\n # Find records with matching fields\n for field in match_fields:\n pool[field] = set(editions_matched(rec, field))\n\n # update title pool with normalized title matches\n pool['title'].update(\n set(editions_matched(rec, 'normalized_title_', normalize(rec['title'])))\n )\n\n # Find records with matching ISBNs\n if isbns := isbns_from_record(rec):\n pool['isbn'] = set(editions_matched(rec, 'isbn_', isbns))\n\n return {k: list(v) for k, v in pool.items() if v}\n\n\ndef find_quick_match(rec):\n \"\"\"\n Attempts to quickly find an existing item match using bibliographic keys.\n\n :param dict rec: Edition record\n :rtype: str|bool\n :return: First key matched of format \"/books/OL..M\" or False if no match found.\n \"\"\"\n\n if 'openlibrary' in rec:\n return '/books/' + rec['openlibrary']\n\n ekeys = editions_matched(rec, 'ocaid')\n if ekeys:\n return ekeys[0]\n\n if isbns := isbns_from_record(rec):\n ekeys = editions_matched(rec, 'isbn_', isbns)\n if ekeys:\n return ekeys[0]\n\n # only searches for the first value from these lists\n for f in 'source_records', 'oclc_numbers', 'lccn':\n if rec.get(f):\n if f == 'source_records' and not rec[f][0].startswith('ia:'):\n continue\n ekeys = editions_matched(rec, f, rec[f][0])\n if ekeys:\n return ekeys[0]\n return False\n\n\ndef editions_matched(rec, key, value=None):\n \"\"\"\n Search OL for editions matching record's 'key' value.\n\n :param dict rec: Edition import record\n :param str key: Key to search on, e.g. 'isbn_'\n :param list|str value: Value or Values to use, overriding record values\n :rtpye: list\n :return: List of edition keys [\"/books/OL..M\",]\n \"\"\"\n if value is None and key not in rec:\n return []\n\n if value is None:\n value = rec[key]\n q = {'type': '/type/edition', key: value}\n ekeys = list(web.ctx.site.things(q))\n return ekeys\n\n\ndef find_exact_match(rec, edition_pool):\n \"\"\"\n Returns an edition key match for rec from edition_pool\n Only returns a key if all values match?\n\n :param dict rec: Edition import record\n :param dict edition_pool:\n :rtype: str|bool\n :return: edition key\n \"\"\"\n seen = set()\n for editions in edition_pool.values():\n for ekey in editions:\n if ekey in seen:\n continue\n seen.add(ekey)\n existing = web.ctx.site.get(ekey)\n match = True\n for k, v in rec.items():\n if k == 'source_records':\n continue\n existing_value = existing.get(k)\n if not existing_value:\n continue\n if k == 'languages':\n existing_value = [\n str(re_lang.match(lang.key).group(1)) for lang in existing_value\n ]\n if k == 'authors':\n existing_value = [dict(a) for a in existing_value]\n for a in existing_value:\n del a['type']\n del a['key']\n for a in v:\n if 'entity_type' in a:\n del a['entity_type']\n if 'db_name' in a:\n del a['db_name']\n\n if existing_value != v:\n match = False\n break\n if match:\n return ekey\n return False\n\n\ndef find_enriched_match(rec, edition_pool):\n \"\"\"\n Find the best match for rec in edition_pool and return its key.\n :param dict rec: the new edition we are trying to match.\n :param list edition_pool: list of possible edition key matches, output of build_pool(import record)\n :rtype: str|None\n :return: None or the edition key '/books/OL...M' of the best edition match for enriched_rec in edition_pool\n \"\"\"\n enriched_rec = expand_record(rec)\n add_db_name(enriched_rec)\n\n seen = set()\n for edition_keys in edition_pool.values():\n for edition_key in edition_keys:\n if edition_key in seen:\n continue\n thing = None\n found = True\n while not thing or is_redirect(thing):\n seen.add(edition_key)\n thing = web.ctx.site.get(edition_key)\n if thing is None:\n found = False\n break\n if is_redirect(thing):\n edition_key = thing['location']\n # FIXME: this updates edition_key, but leaves thing as redirect,\n # which will raise an exception in editions_match()\n if not found:\n continue\n if editions_match(enriched_rec, thing):\n return edition_key\n\n\ndef add_db_name(rec: dict) -> None:\n \"\"\"\n db_name = Author name followed by dates.\n adds 'db_name' in place for each author.\n \"\"\"\n if 'authors' not in rec:\n return\n\n for a in rec['authors'] or []:\n date = None\n if 'date' in a:\n assert 'birth_date' not in a\n assert 'death_date' not in a\n date = a['date']\n elif 'birth_date' in a or 'death_date' in a:\n date = a.get('birth_date', '') + '-' + a.get('death_date', '')\n a['db_name'] = ' '.join([a['name'], date]) if date else a['name']\n\n\ndef load_data(rec, account_key=None):\n \"\"\"\n Adds a new Edition to Open Library. Checks for existing Works.\n Creates a new Work, and Author, if required,\n otherwise associates the new Edition with the existing Work.\n\n :param dict rec: Edition record to add (no further checks at this point)\n :rtype: dict\n :return:\n {\n \"success\": False,\n \"error\": <error msg>\n }\n OR\n {\n \"success\": True,\n \"work\": {\"key\": <key>, \"status\": \"created\" | \"modified\" | \"matched\"},\n \"edition\": {\"key\": <key>, \"status\": \"created\"},\n \"authors\": [{\"status\": \"matched\", \"name\": \"John Smith\", \"key\": <key>}, ...]\n }\n \"\"\"\n\n cover_url = None\n if 'cover' in rec:\n cover_url = rec['cover']\n del rec['cover']\n try:\n # get an OL style edition dict\n edition = build_query(rec)\n except InvalidLanguage as e:\n return {\n 'success': False,\n 'error': str(e),\n }\n\n ekey = web.ctx.site.new_key('/type/edition')\n cover_id = None\n if cover_url:\n cover_id = add_cover(cover_url, ekey, account_key=account_key)\n if cover_id:\n edition['covers'] = [cover_id]\n\n edits = [] # Things (Edition, Work, Authors) to be saved\n reply = {}\n # TOFIX: edition.authors has already been processed by import_authors() in build_query(), following line is a NOP?\n author_in = [\n import_author(a, eastern=east_in_by_statement(rec, a))\n for a in edition.get('authors', [])\n ]\n # build_author_reply() adds authors to edits\n (authors, author_reply) = build_author_reply(\n author_in, edits, rec['source_records'][0]\n )\n\n if authors:\n edition['authors'] = authors\n reply['authors'] = author_reply\n\n wkey = None\n work_state = 'created'\n # Look for an existing work\n if 'authors' in edition:\n wkey = find_matching_work(edition)\n if wkey:\n w = web.ctx.site.get(wkey)\n work_state = 'matched'\n found_wkey_match = True\n need_update = False\n for k in subject_fields:\n if k not in rec:\n continue\n for s in rec[k]:\n if normalize(s) not in [\n normalize(existing) for existing in w.get(k, [])\n ]:\n w.setdefault(k, []).append(s)\n need_update = True\n if cover_id:\n w.setdefault('covers', []).append(cover_id)\n need_update = True\n if need_update:\n work_state = 'modified'\n edits.append(w.dict())\n else:\n # Create new work\n w = new_work(edition, rec, cover_id)\n wkey = w['key']\n edits.append(w)\n\n assert wkey\n edition['works'] = [{'key': wkey}]\n edition['key'] = ekey\n edits.append(edition)\n\n web.ctx.site.save_many(edits, comment='import new book', action='add-book')\n\n # Writes back `openlibrary_edition` and `openlibrary_work` to\n # archive.org item after successful import:\n if 'ocaid' in rec:\n update_ia_metadata_for_ol_edition(ekey.split('/')[-1])\n\n reply['success'] = True\n reply['edition'] = {'key': ekey, 'status': 'created'}\n reply['work'] = {'key': wkey, 'status': work_state}\n return reply\n\n\ndef normalize_import_record(rec: dict) -> None:\n \"\"\"\n Normalize the import record by:\n - Verifying required fields\n - Ensuring source_records is a list\n - Splitting subtitles out of the title field\n - Cleaning all ISBN and LCCN fields ('bibids'), and\n - Deduplicate authors.\n\n NOTE: This function modifies the passed-in rec in place.\n \"\"\"\n required_fields = [\n 'title',\n 'source_records',\n ] # ['authors', 'publishers', 'publish_date']\n for field in required_fields:\n if not rec.get(field):\n raise RequiredField(field)\n\n # Ensure source_records is a list.\n if not isinstance(rec['source_records'], list):\n rec['source_records'] = [rec['source_records']]\n\n # Split subtitle if required and not already present\n if ':' in rec.get('title', '') and not rec.get('subtitle'):\n title, subtitle = split_subtitle(rec.get('title'))\n if subtitle:\n rec['title'] = title\n rec['subtitle'] = subtitle\n\n rec = normalize_record_bibids(rec)\n\n # deduplicate authors\n rec['authors'] = uniq(rec.get('authors', []), dicthash)\n\n\ndef validate_record(rec: dict) -> None:\n \"\"\"\n Check for:\n - publication years too old from non-exempt sources (e.g. Amazon);\n - publish dates in a future year;\n - independently published books; and\n - books that need an ISBN and lack one.\n\n Each check raises an error or returns None.\n\n If all the validations pass, implicitly return None.\n \"\"\"\n # Only validate publication year if a year is found.\n if publication_year := get_publication_year(rec.get('publish_date')):\n if publication_too_old_and_not_exempt(rec):\n raise PublicationYearTooOld(publication_year)\n elif published_in_future_year(publication_year):\n raise PublishedInFutureYear(publication_year)\n\n if is_independently_published(rec.get('publishers', [])):\n raise IndependentlyPublished\n\n if needs_isbn_and_lacks_one(rec):\n raise SourceNeedsISBN\n\n\ndef find_match(rec, edition_pool) -> str | None:\n \"\"\"Use rec to try to find an existing edition key that matches.\"\"\"\n match = find_quick_match(rec)\n if not match:\n match = find_exact_match(rec, edition_pool)\n\n if not match:\n # Add 'full_title' to the rec by conjoining 'title' and 'subtitle'.\n # expand_record() uses this for matching.\n rec['full_title'] = rec['title']\n if subtitle := rec.get('subtitle'):\n rec['full_title'] += ' ' + subtitle\n\n match = find_enriched_match(rec, edition_pool)\n\n return match\n\n\ndef update_edition_with_rec_data(\n rec: dict, account_key: str | None, edition: \"Edition\"\n) -> bool:\n \"\"\"\n Enrich the Edition by adding certain fields present in rec but absent\n in edition.\n\n NOTE: This modifies the passed-in Edition in place.\n \"\"\"\n need_edition_save = False\n # Add cover to edition\n if 'cover' in rec and not edition.get_covers():\n cover_url = rec['cover']\n cover_id = add_cover(cover_url, edition.key, account_key=account_key)\n if cover_id:\n edition['covers'] = [cover_id]\n need_edition_save = True\n\n # Add ocaid to edition (str), if needed\n if 'ocaid' in rec and not edition.ocaid:\n edition['ocaid'] = rec['ocaid']\n need_edition_save = True\n\n # Add list fields to edition as needed\n edition_list_fields = [\n 'local_id',\n 'lccn',\n 'lc_classifications',\n 'oclc_numbers',\n 'source_records',\n ]\n for f in edition_list_fields:\n if f not in rec or not rec[f]:\n continue\n # ensure values is a list\n values = rec[f] if isinstance(rec[f], list) else [rec[f]]\n if f in edition:\n # get values from rec that are not currently on the edition\n to_add = [v for v in values if v not in edition[f]]\n edition[f] += to_add\n else:\n edition[f] = to_add = values\n if to_add:\n need_edition_save = True\n\n other_edition_fields = [\n 'description',\n 'number_of_pages',\n 'publishers',\n 'publish_date',\n ]\n for f in other_edition_fields:\n if f not in rec or not rec[f]:\n continue\n if f not in edition:\n edition[f] = rec[f]\n need_edition_save = True\n\n # Add new identifiers\n if 'identifiers' in rec:\n identifiers = defaultdict(list, edition.dict().get('identifiers', {}))\n for k, vals in rec['identifiers'].items():\n identifiers[k].extend(vals)\n identifiers[k] = list(set(identifiers[k]))\n if edition.dict().get('identifiers') != identifiers:\n edition['identifiers'] = identifiers\n need_edition_save = True\n\n return need_edition_save\n\n\ndef update_work_with_rec_data(\n rec: dict, edition: \"Edition\", work: dict[str, Any], need_work_save: bool\n) -> bool:\n \"\"\"\n Enrich the Work by adding certain fields present in rec but absent\n in work.\n\n NOTE: This modifies the passed-in Work in place.\n \"\"\"\n # Add subjects to work, if not already present\n if 'subjects' in rec:\n work_subjects = list(work.get('subjects', []))\n for s in rec['subjects']:\n if s not in work_subjects:\n work_subjects.append(s)\n need_work_save = True\n if need_work_save and work_subjects:\n work['subjects'] = work_subjects\n\n # Add cover to work, if needed\n if not work.get('covers') and edition.get_covers():\n work['covers'] = [edition['covers'][0]]\n need_work_save = True\n\n # Add description to work, if needed\n if not work.get('description') and edition.get('description'):\n work['description'] = edition['description']\n need_work_save = True\n\n # Add authors to work, if needed\n if not work.get('authors'):\n authors = [import_author(a) for a in rec.get('authors', [])]\n work['authors'] = [\n {'type': {'key': '/type/author_role'}, 'author': a.key}\n for a in authors\n if a.get('key')\n ]\n if work.get('authors'):\n need_work_save = True\n\n return need_work_save\n\n\ndef load(rec, account_key=None):\n \"\"\"Given a record, tries to add/match that edition in the system.\n\n Record is a dictionary containing all the metadata of the edition.\n The following fields are mandatory:\n\n * title: str\n * source_records: list\n\n :param dict rec: Edition record to add\n :rtype: dict\n :return: a dict to be converted into a JSON HTTP response, same as load_data()\n \"\"\"\n if not is_promise_item(rec):\n validate_record(rec)\n normalize_import_record(rec)\n\n # Resolve an edition if possible, or create and return one if not.\n\n edition_pool = build_pool(rec)\n if not edition_pool:\n # No match candidates found, add edition\n return load_data(rec, account_key=account_key)\n\n match = find_match(rec, edition_pool)\n if not match:\n # No match found, add edition\n return load_data(rec, account_key=account_key)\n\n # We have an edition match at this point\n need_work_save = need_edition_save = False\n work: dict[str, Any]\n edition: Edition = web.ctx.site.get(match)\n # check for, and resolve, author redirects\n for a in edition.authors:\n while is_redirect(a):\n if a in edition.authors:\n edition.authors.remove(a)\n a = web.ctx.site.get(a.location)\n if not is_redirect(a):\n edition.authors.append(a)\n\n if edition.get('works'):\n work = edition.works[0].dict()\n work_created = False\n else:\n # Found an edition without a work\n work_created = need_work_save = need_edition_save = True\n work = new_work(edition.dict(), rec)\n edition.works = [{'key': work['key']}]\n\n need_edition_save = update_edition_with_rec_data(\n rec=rec, account_key=account_key, edition=edition\n )\n need_work_save = update_work_with_rec_data(\n rec=rec, edition=edition, work=work, need_work_save=need_work_save\n )\n\n edits = []\n reply = {\n 'success': True,\n 'edition': {'key': match, 'status': 'matched'},\n 'work': {'key': work['key'], 'status': 'matched'},\n }\n if need_edition_save:\n reply['edition']['status'] = 'modified'\n edits.append(edition.dict())\n if need_work_save:\n reply['work']['status'] = 'created' if work_created else 'modified'\n edits.append(work)\n if edits:\n web.ctx.site.save_many(\n edits, comment='import existing book', action='edit-book'\n )\n if 'ocaid' in rec:\n update_ia_metadata_for_ol_edition(match.split('/')[-1])\n return reply\n", "path": "openlibrary/catalog/add_book/__init__.py" } ]
diff --git a/openlibrary/catalog/add_book/__init__.py b/openlibrary/catalog/add_book/__init__.py index 58e1ae04296..afd3c8c37a2 100644 --- a/openlibrary/catalog/add_book/__init__.py +++ b/openlibrary/catalog/add_book/__init__.py @@ -851,6 +851,7 @@ def update_edition_with_rec_data( need_edition_save = True other_edition_fields = [ + 'description', 'number_of_pages', 'publishers', 'publish_date', diff --git a/openlibrary/catalog/add_book/tests/test_add_book.py b/openlibrary/catalog/add_book/tests/test_add_book.py index 155d768ad94..0964b1a48a9 100644 --- a/openlibrary/catalog/add_book/tests/test_add_book.py +++ b/openlibrary/catalog/add_book/tests/test_add_book.py @@ -1265,3 +1265,53 @@ def test_validate_record(name, rec, error) -> None: validate_record(rec) else: assert validate_record(rec) is None, f"Test failed: {name}" # type: ignore [func-returns-value] + + +def test_reimport_updates_edition_and_work_description(mock_site) -> None: + author = { + 'type': {'key': '/type/author'}, + 'name': 'John Smith', + 'key': '/authors/OL1A', + } + + existing_work = { + 'authors': [{'author': '/authors/OL1A', 'type': {'key': '/type/author_role'}}], + 'key': '/works/OL1W', + 'title': 'A Good Book', + 'type': {'key': '/type/work'}, + } + + existing_edition = { + 'key': '/books/OL1M', + 'title': 'A Good Book', + 'publishers': ['Black Spot'], + 'type': {'key': '/type/edition'}, + 'source_records': ['ia:someocaid'], + 'publish_date': 'Jan 09, 2011', + 'isbn_10': ['1234567890'], + 'works': [{'key': '/works/OL1W'}], + } + + mock_site.save(author) + mock_site.save(existing_work) + mock_site.save(existing_edition) + + rec = { + 'source_records': 'ia:someocaid', + 'title': 'A Good Book', + 'authors': [{'name': 'John Smith'}], + 'publishers': ['Black Spot'], + 'publish_date': 'Jan 09, 2011', + 'isbn_10': ['1234567890'], + 'description': 'A genuinely enjoyable read.', + } + + reply = load(rec) + assert reply['success'] is True + assert reply['edition']['status'] == 'modified' + assert reply['work']['status'] == 'modified' + assert reply['work']['key'] == '/works/OL1W' + edition = mock_site.get(reply['edition']['key']) + work = mock_site.get(reply['work']['key']) + assert edition.description == "A genuinely enjoyable read." + assert work.description == "A genuinely enjoyable read."
enthought__chaco-893
ValueError: Handler.init() must return True or False, but instead returned None. when running chaco/chaco/examples/demo/basic/image_from_file.py **Problem Description** When running chaco/chaco/examples/demo/basic/image_from_file.py demo, ValueError: Handler.init() must return True or False, but instead returned None. is raised **Reproduction Steps:** ```python python chaco/chaco/examples/demo/basic/image_from_file.py ``` Link: https://github.com/enthought/chaco/blob/main/chaco/examples/demo/basic/image_from_file.py **Full Stacktrace:** ``` (py311) (base) cyliu@aus552cyliu Documents % python3.11 3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py /Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py:58: DeprecationWarning: find_resource is deprecated. Use importlib.resources instead. image_path = find_resource( 2023-05-02 17:38:36.706 Python[5455:122557] ApplePersistenceIgnoreState: Existing state will not be touched. New state will be written to /var/folders/2z/kylzj9s92y71cxscmljmpqrh0000gt/T/org.python.python.savedState Traceback (most recent call last): File "/Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py", line 224, in <module> sys.exit(main()) ^^^^^^ File "/Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py", line 218, in main view.configure_traits() File "/Users/cyliu/.venvs/py311/lib/python3.11/site-packages/traits/has_traits.py", line 2164, in configure_traits rc = toolkit().view_application( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/toolkit.py", line 237, in view_application return view_application.view_application( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/view_application.py", line 92, in view_application return ViewApplication( ^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/view_application.py", line 127, in __init__ self.ui = self.view.ui( ^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/view.py", line 457, in ui ui.ui(parent, kind) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/ui.py", line 234, in ui self.rebuild(self, parent) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/toolkit.py", line 168, in ui_live ui_live.ui_live(ui, parent) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_live.py", line 52, in ui_live _ui_dialog(ui, parent, BaseDialog.NONMODAL) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_live.py", line 72, in _ui_dialog BaseDialog.display_ui(ui, parent, style) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_base.py", line 299, in display_ui ui.prepare_ui() File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/ui.py", line 404, in prepare_ui raise ValueError( ValueError: Handler.init() must return True or False, but instead returned None. ``` **OS, Python version:** [MacOS, python3.11] ``` Package Version Editable project location ------------------ ----------- ----------------------------------------- apptools 5.2.0 attrdict 2.0.1 attrdict3 2.0.2 blosc2 2.0.0 celiagg 2.1.3 certifi 2022.12.7 chaco 5.1.0 /Users/cyliu/Documents/3.11_test/chaco charset-normalizer 3.1.0 configobj 5.0.8 cPython 0.0.6 Cython 0.29.34 dnspython 2.3.0 enable 5.4.0.dev31 /Users/cyliu/Documents/3.11_test/enable fonttools 4.39.2 h5py 3.8.0 idna 3.4 joblib 1.2.0 kiwisolver 1.4.4 msgpack 1.0.5 numexpr 2.8.4 numpy 1.24.2 packaging 23.1 pandas 1.5.3 Pillow 9.4.0 pip 23.1 py-cpuinfo 9.0.0 PyAudio 0.2.13 pyface 8.0.0 /Users/cyliu/Documents/3.11_test/pyface pygarrayimage 1.0 pyglet 2.0.5 /Users/cyliu/Documents/3.11_test/pyglet Pygments 2.14.0 pymongo 4.3.3 pyparsing 3.0.9 PySide6 6.4.3 PySide6-Addons 6.4.3 PySide6-Essentials 6.4.3 python-dateutil 2.8.2 pythonw 3.0.3 pytz 2023.2 reportlab 3.6.12 requests 2.28.2 scikit-learn 1.2.2 scipy 1.10.1 setuptools 65.6.3 shiboken6 6.4.3 six 1.16.0 tables 3.8.0 threadpoolctl 3.1.0 traits 6.4.1 traitsui 8.0.0.dev0 /Users/cyliu/Documents/3.11_test/traitsui urllib3 1.26.15 wxPython 4.2.0 ``` ValueError: Handler.init() must return True or False, but instead returned None. when running chaco/chaco/examples/demo/basic/image_from_file.py **Problem Description** When running chaco/chaco/examples/demo/basic/image_from_file.py demo, ValueError: Handler.init() must return True or False, but instead returned None. is raised **Reproduction Steps:** ```python python chaco/chaco/examples/demo/basic/image_from_file.py ``` Link: https://github.com/enthought/chaco/blob/main/chaco/examples/demo/basic/image_from_file.py **Full Stacktrace:** ``` (py311) (base) cyliu@aus552cyliu Documents % python3.11 3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py /Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py:58: DeprecationWarning: find_resource is deprecated. Use importlib.resources instead. image_path = find_resource( 2023-05-02 17:38:36.706 Python[5455:122557] ApplePersistenceIgnoreState: Existing state will not be touched. New state will be written to /var/folders/2z/kylzj9s92y71cxscmljmpqrh0000gt/T/org.python.python.savedState Traceback (most recent call last): File "/Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py", line 224, in <module> sys.exit(main()) ^^^^^^ File "/Users/cyliu/Documents/3.11_test/chaco/chaco/examples/demo/basic/image_from_file.py", line 218, in main view.configure_traits() File "/Users/cyliu/.venvs/py311/lib/python3.11/site-packages/traits/has_traits.py", line 2164, in configure_traits rc = toolkit().view_application( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/toolkit.py", line 237, in view_application return view_application.view_application( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/view_application.py", line 92, in view_application return ViewApplication( ^^^^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/view_application.py", line 127, in __init__ self.ui = self.view.ui( ^^^^^^^^^^^^^ File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/view.py", line 457, in ui ui.ui(parent, kind) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/ui.py", line 234, in ui self.rebuild(self, parent) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/toolkit.py", line 168, in ui_live ui_live.ui_live(ui, parent) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_live.py", line 52, in ui_live _ui_dialog(ui, parent, BaseDialog.NONMODAL) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_live.py", line 72, in _ui_dialog BaseDialog.display_ui(ui, parent, style) File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/qt/ui_base.py", line 299, in display_ui ui.prepare_ui() File "/Users/cyliu/Documents/3.11_test/traitsui/traitsui/ui.py", line 404, in prepare_ui raise ValueError( ValueError: Handler.init() must return True or False, but instead returned None. ``` **OS, Python version:** [MacOS, python3.11] ``` Package Version Editable project location ------------------ ----------- ----------------------------------------- apptools 5.2.0 attrdict 2.0.1 attrdict3 2.0.2 blosc2 2.0.0 celiagg 2.1.3 certifi 2022.12.7 chaco 5.1.0 /Users/cyliu/Documents/3.11_test/chaco charset-normalizer 3.1.0 configobj 5.0.8 cPython 0.0.6 Cython 0.29.34 dnspython 2.3.0 enable 5.4.0.dev31 /Users/cyliu/Documents/3.11_test/enable fonttools 4.39.2 h5py 3.8.0 idna 3.4 joblib 1.2.0 kiwisolver 1.4.4 msgpack 1.0.5 numexpr 2.8.4 numpy 1.24.2 packaging 23.1 pandas 1.5.3 Pillow 9.4.0 pip 23.1 py-cpuinfo 9.0.0 PyAudio 0.2.13 pyface 8.0.0 /Users/cyliu/Documents/3.11_test/pyface pygarrayimage 1.0 pyglet 2.0.5 /Users/cyliu/Documents/3.11_test/pyglet Pygments 2.14.0 pymongo 4.3.3 pyparsing 3.0.9 PySide6 6.4.3 PySide6-Addons 6.4.3 PySide6-Essentials 6.4.3 python-dateutil 2.8.2 pythonw 3.0.3 pytz 2023.2 reportlab 3.6.12 requests 2.28.2 scikit-learn 1.2.2 scipy 1.10.1 setuptools 65.6.3 shiboken6 6.4.3 six 1.16.0 tables 3.8.0 threadpoolctl 3.1.0 traits 6.4.1 traitsui 8.0.0.dev0 /Users/cyliu/Documents/3.11_test/traitsui urllib3 1.26.15 wxPython 4.2.0 ```
[ { "content": "#!/usr/bin/env python\n\"\"\"\nLoads and saves RGB images from disk\n - Left-drag pans the plot.\n - Mousewheel up and down zooms the plot in and out.\n - Pressing \"z\" brings up the Zoom Box, and you can click-drag a rectangular\n region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow\n and alt-right-arrow moves you forwards and backwards through the \"zoom\n history\".\n\"\"\"\n\n# Standard library imports\nimport os, sys\n\n# Major library imports\n\n# Enthought library imports\nfrom traits.util.resource import find_resource\nfrom traits.api import File, HasTraits, Instance\nfrom traitsui.api import (\n Action,\n CloseAction,\n Handler,\n Item,\n Menu,\n MenuBar,\n OKCancelButtons,\n Separator,\n View,\n)\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, ImageData, Plot, PlotGraphicsContext\nfrom enable.api import ComponentEditor\nfrom chaco.tools.api import PanTool, ZoomTool\n\n\n# -------------------------------------------------------------------------------\n# Class 'DemoView'\n# -------------------------------------------------------------------------------\n\n\nclass DemoView(HasTraits):\n\n ### Public Traits ##########################################################\n\n # A Plot Data object to hold our image data\n pd = Instance(ArrayPlotData, ())\n\n # A Plot object to plot our image data\n plot = Instance(Plot)\n\n ### Private Traits #########################################################\n\n # File name to load image from\n resource_path = os.path.join(\"examples\", \"basic\", \"capitol.jpg\")\n alt_path = \"capitol.jpg\"\n image_path = find_resource(\n \"Chaco\", resource_path, alt_path=alt_path, return_path=True\n )\n _load_file = File(image_path)\n\n # File name to save image to\n _save_file = File\n\n ### Traits Views ###########################################################\n\n # This view is for a file dialog to select the 'load' filename\n load_file_view = View(\n Item(\"_load_file\"),\n buttons=OKCancelButtons,\n kind=\"livemodal\", # NB must use livemodal, plot objects don't copy well\n width=400,\n resizable=True,\n )\n\n # This view is for a file dialog to select the 'save' filename\n save_file_view = View(\n Item(\"_save_file\"),\n buttons=OKCancelButtons,\n kind=\"livemodal\", # NB must use livemodal, plot objects don't copy well\n width=400,\n resizable=True,\n )\n\n # ---------------------------------------------------------------------------\n # Public 'DemoView' interface\n # ---------------------------------------------------------------------------\n\n def default_traits_view(self):\n \"\"\"Returns the default view to use for this class.\"\"\"\n # NOTE: I moved the view to this method so we can declare a handler\n # for the view. Alternatively, we could move the DemoController class\n # to the top and declare view=Instance(HasTraits) instead.\n traits_view = View(\n Item(\n \"plot\",\n editor=ComponentEditor(),\n show_label=False,\n ),\n menubar=MenuBar(\n Menu(\n Action(\n name=\"Save Plot\", action=\"save\"\n ), # see Controller for\n Action(name=\"Load Plot\", action=\"load\"), # these callbacks\n Separator(),\n CloseAction,\n name=\"File\",\n ),\n ),\n width=600,\n height=600,\n resizable=True,\n handler=DemoController,\n )\n return traits_view\n\n # ---------------------------------------------------------------------------\n # Private 'DemoView' interface\n # ---------------------------------------------------------------------------\n\n def _plot_default(self):\n # Create the plot object, set some options, and add some tools\n plot = Plot(self.pd, default_origin=\"top left\")\n plot.x_axis.orientation = \"top\"\n plot.padding = 50\n plot.padding_top = 75\n plot.tools.append(PanTool(plot))\n zoom = ZoomTool(component=plot, tool_mode=\"box\", always_on=False)\n plot.overlays.append(zoom)\n\n # Load the default image\n self._load(plot)\n\n # Plot the image plot with this image\n plot.img_plot(\"imagedata\")\n\n return plot\n\n def _save(self):\n # Create a graphics context of the right size\n win_size = self.plot.outer_bounds\n plot_gc = PlotGraphicsContext(win_size)\n\n # Have the plot component into it\n plot_gc.render_component(self.plot)\n\n # Save out to the user supplied filename\n plot_gc.save(self._save_file)\n\n def _load(self, plot=None):\n if plot is None:\n plot = self.plot\n # Load the image with the user supplied filename\n image = ImageData.fromfile(self._load_file)\n\n # Update the plot data. NB we must extract _data from the image\n # for the time being, until ImageData is made more friendly\n self.pd.set_data(\"imagedata\", image._data)\n\n # Set the title and redraw\n plot.title = os.path.basename(self._load_file)\n plot.request_redraw()\n\n\n# -------------------------------------------------------------------------------\n# Class 'DemoController'\n# -------------------------------------------------------------------------------\n\n\nclass DemoController(Handler):\n\n # The HasTraits object we are a controller for\n view = Instance(DemoView)\n\n # ---------------------------------------------------------------------------\n # Public 'DemoController' interface\n # ---------------------------------------------------------------------------\n\n def init(self, info):\n \"\"\"Initializes the controls of a user interface.\n Overridden here to assign the 'view' trait.\n \"\"\"\n self.view = info.object\n\n def save(self, ui_info):\n \"\"\"\n Callback for the 'Save Image' menu option.\n \"\"\"\n ui = self.view.edit_traits(view=\"save_file_view\")\n if ui.result == True:\n self.view._save()\n\n def load(self, ui_info):\n \"\"\"\n Callback for the 'Load Image' menu option.\n \"\"\"\n ui = self.view.edit_traits(view=\"load_file_view\")\n if ui.result == True:\n self.view._load()\n\n\n# ===============================================================================\n# # popup object that is used by the demo.py application.\n# ===============================================================================\n# Note: we declare a 'popup' rather than a 'demo' since the menubar doesn't seem\n# to show up in a 'panel' mode.\npopup = DemoView()\n\n# -------------------------------------------------------------------------------\n# Function 'main'\n# -------------------------------------------------------------------------------\n\n\ndef main(argv=None):\n view = DemoView()\n view.configure_traits()\n\n\n# -------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "chaco/examples/demo/basic/image_from_file.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"\nLoads and saves RGB images from disk\n - Left-drag pans the plot.\n - Mousewheel up and down zooms the plot in and out.\n - Pressing \"z\" brings up the Zoom Box, and you can click-drag a rectangular\n region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow\n and alt-right-arrow moves you forwards and backwards through the \"zoom\n history\".\n\"\"\"\n\n# Standard library imports\nimport os, sys\n\n# Major library imports\n\n# Enthought library imports\nfrom traits.util.resource import find_resource\nfrom traits.api import File, HasTraits, Instance\nfrom traitsui.api import (\n Action,\n CloseAction,\n Handler,\n Item,\n Menu,\n MenuBar,\n OKCancelButtons,\n Separator,\n View,\n)\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, ImageData, Plot, PlotGraphicsContext\nfrom enable.api import ComponentEditor\nfrom chaco.tools.api import PanTool, ZoomTool\n\n\n# -------------------------------------------------------------------------------\n# Class 'DemoView'\n# -------------------------------------------------------------------------------\n\n\nclass DemoView(HasTraits):\n\n ### Public Traits ##########################################################\n\n # A Plot Data object to hold our image data\n pd = Instance(ArrayPlotData, ())\n\n # A Plot object to plot our image data\n plot = Instance(Plot)\n\n ### Private Traits #########################################################\n\n # File name to load image from\n resource_path = os.path.join(\"examples\", \"basic\", \"capitol.jpg\")\n alt_path = \"capitol.jpg\"\n image_path = find_resource(\n \"Chaco\", resource_path, alt_path=alt_path, return_path=True\n )\n _load_file = File(image_path)\n\n # File name to save image to\n _save_file = File\n\n ### Traits Views ###########################################################\n\n # This view is for a file dialog to select the 'load' filename\n load_file_view = View(\n Item(\"_load_file\"),\n buttons=OKCancelButtons,\n kind=\"livemodal\", # NB must use livemodal, plot objects don't copy well\n width=400,\n resizable=True,\n )\n\n # This view is for a file dialog to select the 'save' filename\n save_file_view = View(\n Item(\"_save_file\"),\n buttons=OKCancelButtons,\n kind=\"livemodal\", # NB must use livemodal, plot objects don't copy well\n width=400,\n resizable=True,\n )\n\n # ---------------------------------------------------------------------------\n # Public 'DemoView' interface\n # ---------------------------------------------------------------------------\n\n def default_traits_view(self):\n \"\"\"Returns the default view to use for this class.\"\"\"\n # NOTE: I moved the view to this method so we can declare a handler\n # for the view. Alternatively, we could move the DemoController class\n # to the top and declare view=Instance(HasTraits) instead.\n traits_view = View(\n Item(\n \"plot\",\n editor=ComponentEditor(),\n show_label=False,\n ),\n menubar=MenuBar(\n Menu(\n Action(\n name=\"Save Plot\", action=\"save\"\n ), # see Controller for\n Action(name=\"Load Plot\", action=\"load\"), # these callbacks\n Separator(),\n CloseAction,\n name=\"File\",\n ),\n ),\n width=600,\n height=600,\n resizable=True,\n handler=DemoController,\n )\n return traits_view\n\n # ---------------------------------------------------------------------------\n # Private 'DemoView' interface\n # ---------------------------------------------------------------------------\n\n def _plot_default(self):\n # Create the plot object, set some options, and add some tools\n plot = Plot(self.pd, default_origin=\"top left\")\n plot.x_axis.orientation = \"top\"\n plot.padding = 50\n plot.padding_top = 75\n plot.tools.append(PanTool(plot))\n zoom = ZoomTool(component=plot, tool_mode=\"box\", always_on=False)\n plot.overlays.append(zoom)\n\n # Load the default image\n self._load(plot)\n\n # Plot the image plot with this image\n plot.img_plot(\"imagedata\")\n\n return plot\n\n def _save(self):\n # Create a graphics context of the right size\n win_size = self.plot.outer_bounds\n plot_gc = PlotGraphicsContext(win_size)\n\n # Have the plot component into it\n plot_gc.render_component(self.plot)\n\n # Save out to the user supplied filename\n plot_gc.save(self._save_file)\n\n def _load(self, plot=None):\n if plot is None:\n plot = self.plot\n # Load the image with the user supplied filename\n image = ImageData.fromfile(self._load_file)\n\n # Update the plot data. NB we must extract _data from the image\n # for the time being, until ImageData is made more friendly\n self.pd.set_data(\"imagedata\", image._data)\n\n # Set the title and redraw\n plot.title = os.path.basename(self._load_file)\n plot.request_redraw()\n\n\n# -------------------------------------------------------------------------------\n# Class 'DemoController'\n# -------------------------------------------------------------------------------\n\n\nclass DemoController(Handler):\n\n # The HasTraits object we are a controller for\n view = Instance(DemoView)\n\n # ---------------------------------------------------------------------------\n # Public 'DemoController' interface\n # ---------------------------------------------------------------------------\n\n def init(self, info):\n \"\"\"Initializes the controls of a user interface.\n Overridden here to assign the 'view' trait.\n \"\"\"\n self.view = info.object\n return True\n\n def save(self, ui_info):\n \"\"\"\n Callback for the 'Save Image' menu option.\n \"\"\"\n ui = self.view.edit_traits(view=\"save_file_view\")\n if ui.result == True:\n self.view._save()\n\n def load(self, ui_info):\n \"\"\"\n Callback for the 'Load Image' menu option.\n \"\"\"\n ui = self.view.edit_traits(view=\"load_file_view\")\n if ui.result == True:\n self.view._load()\n\n\n# ===============================================================================\n# # popup object that is used by the demo.py application.\n# ===============================================================================\n# Note: we declare a 'popup' rather than a 'demo' since the menubar doesn't seem\n# to show up in a 'panel' mode.\npopup = DemoView()\n\n# -------------------------------------------------------------------------------\n# Function 'main'\n# -------------------------------------------------------------------------------\n\n\ndef main(argv=None):\n view = DemoView()\n view.configure_traits()\n\n\n# -------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "chaco/examples/demo/basic/image_from_file.py" } ]
diff --git a/chaco/examples/demo/basic/image_from_file.py b/chaco/examples/demo/basic/image_from_file.py index fde4eded5..71c1f4523 100644 --- a/chaco/examples/demo/basic/image_from_file.py +++ b/chaco/examples/demo/basic/image_from_file.py @@ -183,6 +183,7 @@ def init(self, info): Overridden here to assign the 'view' trait. """ self.view = info.object + return True def save(self, ui_info): """
pytorch__ignite-1365
MyPy: improve ignite.base module ## 🚀 Feature Currently, mypy ignores all errors for all modules. We have to rework our typing such that mypy checks the code. In this issue, let's improve https://github.com/pytorch/ignite/tree/master/ignite/base module such that mypy passes on it. For Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue. Please, take a look at CONTRIBUTING guide. Improve typing for ignite.handlers module (1343) Fixes #1343 Description: Improves typing (when possible) for `ignite.handlers` module. Check list: * [x] New tests are added (if a new feature is added) * [ ] New doc strings: description and/or example code are in RST format * [ ] Documentation is updated (if required)
[ { "content": "from collections import OrderedDict\nfrom collections.abc import Mapping\n\n\nclass Serializable:\n\n _state_dict_all_req_keys = ()\n _state_dict_one_of_opt_keys = ()\n\n def state_dict(self) -> OrderedDict:\n pass\n\n def load_state_dict(self, state_dict: Mapping) -> None:\n if not isinstance(state_dict, Mapping):\n raise TypeError(\"Argument state_dict should be a dictionary, but given {}\".format(type(state_dict)))\n\n for k in self._state_dict_all_req_keys:\n if k not in state_dict:\n raise ValueError(\n \"Required state attribute '{}' is absent in provided state_dict '{}'\".format(k, state_dict.keys())\n )\n opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]\n if len(opts) > 0 and ((not any(opts)) or (all(opts))):\n raise ValueError(\"state_dict should contain only one of '{}' keys\".format(self._state_dict_one_of_opt_keys))\n", "path": "ignite/base/mixins.py" } ]
[ { "content": "from collections import OrderedDict\nfrom collections.abc import Mapping\n\n\nclass Serializable:\n\n _state_dict_all_req_keys = () # type: tuple\n _state_dict_one_of_opt_keys = () # type: tuple\n\n def state_dict(self) -> OrderedDict:\n pass\n\n def load_state_dict(self, state_dict: Mapping) -> None:\n if not isinstance(state_dict, Mapping):\n raise TypeError(\"Argument state_dict should be a dictionary, but given {}\".format(type(state_dict)))\n\n for k in self._state_dict_all_req_keys:\n if k not in state_dict:\n raise ValueError(\n \"Required state attribute '{}' is absent in provided state_dict '{}'\".format(k, state_dict.keys())\n )\n opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]\n if len(opts) > 0 and ((not any(opts)) or (all(opts))):\n raise ValueError(\"state_dict should contain only one of '{}' keys\".format(self._state_dict_one_of_opt_keys))\n", "path": "ignite/base/mixins.py" } ]
diff --git a/ignite/base/mixins.py b/ignite/base/mixins.py index 5cb35e1712fc..93b3ac0dd424 100644 --- a/ignite/base/mixins.py +++ b/ignite/base/mixins.py @@ -4,8 +4,8 @@ class Serializable: - _state_dict_all_req_keys = () - _state_dict_one_of_opt_keys = () + _state_dict_all_req_keys = () # type: tuple + _state_dict_one_of_opt_keys = () # type: tuple def state_dict(self) -> OrderedDict: pass diff --git a/mypy.ini b/mypy.ini index 586ae4633cf3..e0372c8029f2 100644 --- a/mypy.ini +++ b/mypy.ini @@ -3,10 +3,6 @@ files = ignite pretty = True show_error_codes = True -[mypy-ignite.base.*] - -ignore_errors = True - [mypy-ignite.contrib.*] ignore_errors = True
cocotb__cocotb-1776
coroutines that return before their first yield cause the simulator to shutdown Repro: ```python @cocotb.test() def test_func_empty(dut): """ Test that a function can complete before the first yield """ @cocotb.coroutine def func_empty(): print("This line runs") return yield # needed to make this a coroutine yield func_empty() print("This line is never reached") ```
[ { "content": "# Copyright cocotb contributors\n# Licensed under the Revised BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-3-Clause\nimport IPython\nfrom IPython.terminal.ipapp import load_default_config\nfrom IPython.terminal.prompts import Prompts, Token\n\nimport cocotb\n\n\nclass SimTimePrompt(Prompts):\n \"\"\" custom prompt that shows the sim time after a trigger fires \"\"\"\n _show_time = 1\n\n def in_prompt_tokens(self, cli=None):\n tokens = super().in_prompt_tokens()\n if self._show_time == self.shell.execution_count:\n tokens = [\n (Token.Comment, \"sim time: {}\".format(cocotb.utils.get_sim_time())),\n (Token.Text, \"\\n\"),\n ] + tokens\n return tokens\n\n\ndef _runner(shell, x):\n \"\"\" Handler for async functions \"\"\"\n ret = cocotb.scheduler.queue_function(x)\n shell.prompts._show_time = shell.execution_count\n return ret\n\n\nasync def embed(user_ns: dict = {}):\n \"\"\"\n Start an ipython shell in the current coroutine.\n\n Unlike using :func:`IPython.embed` directly, the :keyword:`await` keyword\n can be used directly from the shell to wait for triggers.\n The :keyword:`yield` keyword from the legacy :ref:`yield-syntax` is not supported.\n\n This coroutine will complete only when the user exits the interactive session.\n\n Args:\n user_ns:\n The variables to have made available in the shell.\n Passing ``locals()`` is often a good idea.\n ``cocotb`` will automatically be included.\n\n Notes:\n\n If your simulator does not provide an appropriate ``stdin``, you may\n find you cannot type in the resulting shell. Using simulators in batch\n or non-GUI mode may resolve this. This feature is experimental, and\n not all simulators are supported.\n \"\"\"\n # ensure cocotb is in the namespace, for convenience\n default_ns = dict(cocotb=cocotb)\n default_ns.update(user_ns)\n\n # build the config to enable `await`\n c = load_default_config()\n c.TerminalInteractiveShell.loop_runner = lambda x: _runner(shell, x)\n c.TerminalInteractiveShell.autoawait = True\n\n # create a shell with access to the dut, and cocotb pre-imported\n shell = IPython.terminal.embed.InteractiveShellEmbed(\n user_ns=default_ns,\n config=c,\n )\n\n # add our custom prompts\n shell.prompts = SimTimePrompt(shell)\n\n # start the shell in a background thread\n @cocotb.external\n def run_shell():\n shell()\n await run_shell()\n\n\[email protected]()\nasync def run_ipython(dut):\n \"\"\" A test that launches an interactive Python shell.\n\n Do not call this directly - use this as ``make MODULE=cocotb.ipython_support``.\n\n Within the shell, a global ``dut`` variable pointing to the design will be present.\n \"\"\"\n await cocotb.triggers.Timer(0) # workaround for gh-637\n await embed(user_ns=dict(dut=dut))\n", "path": "cocotb/ipython_support.py" } ]
[ { "content": "# Copyright cocotb contributors\n# Licensed under the Revised BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-3-Clause\nimport IPython\nfrom IPython.terminal.ipapp import load_default_config\nfrom IPython.terminal.prompts import Prompts, Token\n\nimport cocotb\n\n\nclass SimTimePrompt(Prompts):\n \"\"\" custom prompt that shows the sim time after a trigger fires \"\"\"\n _show_time = 1\n\n def in_prompt_tokens(self, cli=None):\n tokens = super().in_prompt_tokens()\n if self._show_time == self.shell.execution_count:\n tokens = [\n (Token.Comment, \"sim time: {}\".format(cocotb.utils.get_sim_time())),\n (Token.Text, \"\\n\"),\n ] + tokens\n return tokens\n\n\ndef _runner(shell, x):\n \"\"\" Handler for async functions \"\"\"\n ret = cocotb.scheduler.queue_function(x)\n shell.prompts._show_time = shell.execution_count\n return ret\n\n\nasync def embed(user_ns: dict = {}):\n \"\"\"\n Start an ipython shell in the current coroutine.\n\n Unlike using :func:`IPython.embed` directly, the :keyword:`await` keyword\n can be used directly from the shell to wait for triggers.\n The :keyword:`yield` keyword from the legacy :ref:`yield-syntax` is not supported.\n\n This coroutine will complete only when the user exits the interactive session.\n\n Args:\n user_ns:\n The variables to have made available in the shell.\n Passing ``locals()`` is often a good idea.\n ``cocotb`` will automatically be included.\n\n Notes:\n\n If your simulator does not provide an appropriate ``stdin``, you may\n find you cannot type in the resulting shell. Using simulators in batch\n or non-GUI mode may resolve this. This feature is experimental, and\n not all simulators are supported.\n \"\"\"\n # ensure cocotb is in the namespace, for convenience\n default_ns = dict(cocotb=cocotb)\n default_ns.update(user_ns)\n\n # build the config to enable `await`\n c = load_default_config()\n c.TerminalInteractiveShell.loop_runner = lambda x: _runner(shell, x)\n c.TerminalInteractiveShell.autoawait = True\n\n # create a shell with access to the dut, and cocotb pre-imported\n shell = IPython.terminal.embed.InteractiveShellEmbed(\n user_ns=default_ns,\n config=c,\n )\n\n # add our custom prompts\n shell.prompts = SimTimePrompt(shell)\n\n # start the shell in a background thread\n @cocotb.external\n def run_shell():\n shell()\n await run_shell()\n\n\[email protected]()\nasync def run_ipython(dut):\n \"\"\" A test that launches an interactive Python shell.\n\n Do not call this directly - use this as ``make MODULE=cocotb.ipython_support``.\n\n Within the shell, a global ``dut`` variable pointing to the design will be present.\n \"\"\"\n await embed(user_ns=dict(dut=dut))\n", "path": "cocotb/ipython_support.py" } ]
diff --git a/cocotb/ipython_support.py b/cocotb/ipython_support.py index 02662ebd88..45d1fc641a 100644 --- a/cocotb/ipython_support.py +++ b/cocotb/ipython_support.py @@ -85,5 +85,4 @@ async def run_ipython(dut): Within the shell, a global ``dut`` variable pointing to the design will be present. """ - await cocotb.triggers.Timer(0) # workaround for gh-637 await embed(user_ns=dict(dut=dut)) diff --git a/tests/test_cases/test_external/test_external.py b/tests/test_cases/test_external/test_external.py index 769f6d1ecd..4b47245a74 100755 --- a/tests/test_cases/test_external/test_external.py +++ b/tests/test_cases/test_external/test_external.py @@ -32,8 +32,8 @@ Also used a regression test of cocotb capabilities """ - import threading + import cocotb from cocotb.result import TestFailure from cocotb.triggers import Timer, RisingEdge, ReadOnly @@ -42,25 +42,21 @@ from cocotb.utils import get_sim_time -# Tests relating to calling convention and operation - def return_two(dut): - # dut._log.info("Sleeping") return 2 @cocotb.function -def yield_to_readwrite(dut): - yield RisingEdge(dut.clk) - dut._log.info("Returning from yield_to_readwrite") - yield RisingEdge(dut.clk) - dut._log.info("Returning from yield_to_readwrite") - yield Timer(1, "ns") +async def await_two_clock_edges(dut): + await RisingEdge(dut.clk) + await RisingEdge(dut.clk) + await Timer(1, units='ns') + dut._log.info("Returning from await_two_clock_edges") return 2 def calls_cocotb_function(dut): - return yield_to_readwrite(dut) + return await_two_clock_edges(dut) def print_sim_time(dut, base_time): @@ -75,150 +71,185 @@ def print_sim_time(dut, base_time): dut._log.info("external function has ended") [email protected] -def clock_monitor(dut): - count = 0 - while True: - yield RisingEdge(dut.clk) - yield Timer(1000) - count += 1 - - @cocotb.test() -def test_time_in_external(dut): - """Test that the simulation time does not advance if the wrapped external - routine does not itself yield""" - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - yield Timer(10, 'ns') +async def test_time_in_external(dut): + """ + Test that the simulation time does not advance if the wrapped external + routine does not call @function + """ + await Timer(10, units='ns') time = get_sim_time('ns') dut._log.info("Time at start of test = %d" % time) for i in range(100): dut._log.info("Loop call %d" % i) - yield external(print_sim_time)(dut, time) + await external(print_sim_time)(dut, time) time_now = get_sim_time('ns') - yield Timer(10, 'ns') + await Timer(10, units='ns') if time != time_now: raise TestFailure("Time has elapsed over external call") [email protected] -def wait_cycles(dut, n): - for _ in range(n): - yield RisingEdge(dut.clk) - - -def wait_cycles_wrapper(dut, n): - return wait_cycles(dut, n) - # Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076) @cocotb.test(expect_error=cocotb.triggers.TriggerException if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"] else False) -def test_time_in_external_yield(dut): - """Test that an external function calling back into a cocotb function - takes the expected amount of time""" - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - yield Timer(10, 'ns') +async def test_time_in_function(dut): + """ + Test that an @external function calling back into a cocotb @function + takes the expected amount of time + """ + @cocotb.function + def wait_cycles(dut, n): + for _ in range(n): + yield RisingEdge(dut.clk) + + @external + def wait_cycles_wrapper(dut, n): + return wait_cycles(dut, n) + + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) + await Timer(10, units='ns') for n in range(5): for i in range(20): - yield RisingEdge(dut.clk) - time = get_sim_time() + await RisingEdge(dut.clk) + time = get_sim_time('ns') expected_after = time + 100*n - yield external(wait_cycles_wrapper)(dut, n) - time_after = get_sim_time() + await wait_cycles_wrapper(dut, n) + time_after = get_sim_time('ns') if expected_after != time_after: raise TestFailure("Wrong time elapsed in external call") + # Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076) @cocotb.test(expect_error=cocotb.triggers.TriggerException if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"] else False) -def test_ext_call_return(dut): - """Test ability to yield on an external non cocotb coroutine decorated - function""" - mon = cocotb.scheduler.queue(clock_monitor(dut)) - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - value = yield external(return_two)(dut) +async def test_external_call_return(dut): + """ + Test ability to await an external function that is not a coroutine using @external + """ + async def clock_monitor(dut): + count = 0 + while True: + await RisingEdge(dut.clk) + await Timer(1000, units='ns') + count += 1 + + mon = cocotb.fork(clock_monitor(dut)) + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) + value = await external(return_two)(dut) assert value == 2 @cocotb.test() -def test_multiple_externals(dut): - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - - value = yield external(return_two)(dut) +async def test_consecutive_externals(dut): + """ + Test that multiple @external functions can be called in the same test + """ + value = await external(return_two)(dut) dut._log.info("First one completed") assert value == 2 - value = yield external(return_two)(dut) + value = await external(return_two)(dut) dut._log.info("Second one completed") assert value == 2 @cocotb.test() -def test_external_from_readonly(dut): - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) +async def test_external_from_readonly(dut): + """ + Test that @external functions that don't consume simulation time + can be called from ReadOnly state + """ + await ReadOnly() + dut._log.info("In readonly") + value = await external(return_two)(dut) + assert value == 2 - yield ReadOnly() + [email protected]() +async def test_function_from_readonly(dut): + """ + Test that @external functions that call @functions that await Triggers + can be called from ReadOnly state + """ + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) + + await ReadOnly() dut._log.info("In readonly") - value = yield external(return_two)(dut) + value = await external(calls_cocotb_function)(dut) assert value == 2 + # Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076) @cocotb.test(expect_error=cocotb.triggers.TriggerException if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"] else False) -def test_external_that_yields(dut): - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) +async def test_function_that_awaits(dut): + """ + Test that @external functions can call @function coroutines that + awaits Triggers and return values back through to + the test + """ + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) - value = yield external(calls_cocotb_function)(dut) + value = await external(calls_cocotb_function)(dut) assert value == 2 + # Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076) @cocotb.test(expect_error=cocotb.triggers.TriggerException if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"] else False) -def test_external_and_continue(dut): - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) +async def test_await_after_function(dut): + """ + Test that awaiting a Trigger works after returning + from @external functions that call @functions that consume + simulation time + """ + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) - value = yield external(calls_cocotb_function)(dut) + value = await external(calls_cocotb_function)(dut) assert value == 2 - yield Timer(10, "ns") - yield RisingEdge(dut.clk) - + await Timer(10, units="ns") + await RisingEdge(dut.clk) [email protected] -def run_external(dut): - value = yield external(calls_cocotb_function)(dut) - return value # Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076) @cocotb.test(expect_error=cocotb.triggers.TriggerException if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"] else False) -def test_external_from_fork(dut): - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) +async def test_external_from_fork(dut): + """ + Test that @external functions work when awaited from a forked + task + """ + async def run_function(dut): + value = await external(calls_cocotb_function)(dut) + return value - coro = cocotb.fork(run_external(dut)) - value = yield coro.join() - assert value == 2 + async def run_external(dut): + value = await external(return_two)(dut) + return value - dut._log.info("Back from join") + clk_gen = cocotb.fork(Clock(dut.clk, 100, units='ns').start()) + coro1 = cocotb.fork(run_function(dut)) + value = await coro1.join() + assert value == 2 + dut._log.info("Back from join 1") [email protected](expect_fail=True, skip=True) -def test_ext_exit_error(dut): - """Test that a premature exit of the sim at its request still results in - the clean close down of the sim world""" - yield external(return_two)(dut) - yield Timer(1000) + value = 0 + coro2 = cocotb.fork(run_external(dut)) + value = await coro2.join() + assert value == 2 + dut._log.info("Back from join 2") @cocotb.test() -def test_external_raised_exception(dut): - """ Test that exceptions thrown by @external functions can be caught """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - +async def test_external_raised_exception(dut): + """ + Test that exceptions thrown by @external functions can be caught + """ @external def func(): raise ValueError() try: - yield func() + await func() except ValueError: pass else: @@ -226,17 +257,16 @@ def func(): @cocotb.test() -def test_external_returns_exception(dut): - """ Test that exceptions can be returned by @external functions """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - +async def test_external_returns_exception(dut): + """ + Test that exceptions can be returned by @external functions + """ @external def func(): return ValueError() try: - result = yield func() + result = await func() except ValueError: raise TestFailure('Exception should not have been thrown') @@ -245,22 +275,20 @@ def func(): @cocotb.test() -def test_function_raised_exception(dut): - """ Test that exceptions thrown by @function coroutines can be caught """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - +async def test_function_raised_exception(dut): + """ + Test that exceptions thrown by @function coroutines can be caught + """ @cocotb.function - def func(): + async def func(): raise ValueError() - yield @external def ext(): return func() try: - yield ext() + await ext() except ValueError: pass else: @@ -268,22 +296,21 @@ def ext(): @cocotb.test() -def test_function_returns_exception(dut): - """ Test that exceptions can be returned by @function coroutines """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - +async def test_function_returns_exception(dut): + """ + Test that exceptions can be returned by @function coroutines + """ @cocotb.function - def func(): + def gen_func(): return ValueError() yield @external def ext(): - return func() + return gen_func() try: - result = yield ext() + result = await ext() except ValueError: raise TestFailure('Exception should not have been thrown') @@ -292,22 +319,19 @@ def ext(): @cocotb.test() -def test_function_from_weird_thread_fails(dut): +async def test_function_from_weird_thread_fails(dut): """ Test that background threads caling a @function do not hang forever """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - func_started = False caller_resumed = False raised = False @cocotb.function - def func(): + async def func(): nonlocal func_started func_started = True - yield Timer(10) + await Timer(10, units='ns') def function_caller(): nonlocal raised @@ -329,27 +353,24 @@ def ext(): task = cocotb.fork(ext()) - yield Timer(20) + await Timer(20, units='ns') assert caller_resumed, "Caller was never resumed" assert not func_started, "Function should never have started" assert raised, "No exception was raised to warn the user" - yield task.join() + await task.join() @cocotb.test() -def test_function_called_in_parallel(dut): +async def test_function_called_in_parallel(dut): """ Test that the same `@function` can be called from two parallel background threads. """ - # workaround for gh-637 - clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) - @cocotb.function - def function(x): - yield Timer(1) + async def function(x): + await Timer(1, units='ns') return x @cocotb.external @@ -358,7 +379,7 @@ def call_function(x): t1 = cocotb.fork(call_function(1)) t2 = cocotb.fork(call_function(2)) - v1 = yield t1 - v2 = yield t2 + v1 = await t1 + v2 = await t2 assert v1 == 1, v1 assert v2 == 2, v2
qtile__qtile-4669
Icons used in cloned LaunchBars do not dynamically change their size when `icon_size` isn't set ### Issue description I cloned a LaunchBar for use on my multi-monitor setup, and drew bars for each monitor, with the main bar a different size from the secondary bars. Resulting behavior: the icons on the secondary bar are sized appropriately, and the icons on the main bar match the size of the icons on the secondary bar. Secondary bar: ![1706381680](https://github.com/qtile/qtile/assets/794101/7c7b2c65-344f-4493-b8a4-8fc31225c9ce) Main bar (whose height is 2x the height of the secondary bar): ![1706381644](https://github.com/qtile/qtile/assets/794101/207e4480-1ad5-4904-be6b-c718527ae8b9) Expected behavior: the size of the icons fit to the size of their respective bars. Steps to reproduce: 1. Create a qtile environment with at least two available screens. 2. Create a LaunchBar widget that loads an image for the icon. For example, a simple Discord launch button like `LB_test = widget.LaunchBar(progs=['discord', 'discord', ''])` should suffice. 3. Draw two bars in `screens` where one bar has a different size from the second one, and a clone of the above widget is inserted in place of the original. For example, `screens = [Screen(bottom=bar.Bar([LB_test], 56)), Screen(bottom=bar.Bar([LB_test.clone()], 28))]` If needed, I can include a version of my configuration file where the issue is produced. ### Version 0.23.0 ### Backend X11 (default) ### Logs _No response_ ### Required - [X] I have searched past issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
[ { "content": "# Copyright (c) 2008-2010 Aldo Cortesi\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Kenji_Takahashi\n# Copyright (c) 2011 Paul Colomiets\n# Copyright (c) 2012 roger\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012-2015 Tycho Andersen\n# Copyright (c) 2013 dequis\n# Copyright (c) 2013 David R. Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Justin Bronder\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport copy\nimport math\nimport subprocess\nfrom typing import TYPE_CHECKING\n\nfrom libqtile import bar, configurable, confreader\nfrom libqtile.command import interface\nfrom libqtile.command.base import CommandError, CommandObject, expose_command\nfrom libqtile.lazy import LazyCall\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import create_task\n\nif TYPE_CHECKING:\n from typing import Any\n\n from libqtile.command.base import ItemT\n\n# Each widget class must define which bar orientation(s) it supports by setting\n# these bits in an 'orientations' class attribute. Simply having the attribute\n# inherited by superclasses is discouraged, because if a superclass that was\n# only supporting one orientation, adds support for the other, its subclasses\n# will have to be adapted too, in general. ORIENTATION_NONE is only added for\n# completeness' sake.\n# +------------------------+--------------------+--------------------+\n# | Widget bits | Horizontal bar | Vertical bar |\n# +========================+====================+====================+\n# | ORIENTATION_NONE | ConfigError raised | ConfigError raised |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_HORIZONTAL | Widget displayed | ConfigError raised |\n# | | horizontally | |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_VERTICAL | ConfigError raised | Widget displayed |\n# | | | vertically |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_BOTH | Widget displayed | Widget displayed |\n# | | horizontally | vertically |\n# +------------------------+--------------------+--------------------+\n\n\nclass _Orientations(int):\n def __new__(cls, value, doc):\n return super().__new__(cls, value)\n\n def __init__(self, value, doc):\n self.doc = doc\n\n def __str__(self):\n return self.doc\n\n def __repr__(self):\n return self.doc\n\n\nORIENTATION_NONE = _Orientations(0, \"none\")\nORIENTATION_HORIZONTAL = _Orientations(1, \"horizontal only\")\nORIENTATION_VERTICAL = _Orientations(2, \"vertical only\")\nORIENTATION_BOTH = _Orientations(3, \"horizontal and vertical\")\n\n\nclass _Widget(CommandObject, configurable.Configurable):\n \"\"\"Base Widget class\n\n If length is set to the special value `bar.STRETCH`, the bar itself will\n set the length to the maximum remaining space, after all other widgets have\n been configured.\n\n In horizontal bars, 'length' corresponds to the width of the widget; in\n vertical bars, it corresponds to the widget's height.\n\n The offsetx and offsety attributes are set by the Bar after all widgets\n have been configured.\n\n Callback functions can be assigned to button presses by passing a dict to the\n 'callbacks' kwarg. No arguments are passed to the function so, if\n you need access to the qtile object, it needs to be imported into your code.\n\n ``lazy`` functions can also be passed as callback functions and can be used in\n the same way as keybindings.\n\n For example:\n\n .. code-block:: python\n\n from libqtile import qtile\n\n def open_calendar():\n qtile.spawn('gsimplecal next_month')\n\n clock = widget.Clock(\n mouse_callbacks={\n 'Button1': open_calendar,\n 'Button3': lazy.spawn('gsimplecal prev_month')\n }\n )\n\n When the clock widget receives a click with button 1, the ``open_calendar`` function\n will be executed.\n \"\"\"\n\n orientations = ORIENTATION_BOTH\n\n # Default (empty set) is for all backends to be supported. Widgets can override this\n # to explicitly confirm which backends are supported\n supported_backends: set[str] = set()\n\n offsetx: int = 0\n offsety: int = 0\n defaults: list[tuple[str, Any, str]] = [\n (\"background\", None, \"Widget background color\"),\n (\n \"mouse_callbacks\",\n {},\n \"Dict of mouse button press callback functions. Accepts functions and ``lazy`` calls.\",\n ),\n ]\n\n def __init__(self, length, **config):\n \"\"\"\n length: bar.STRETCH, bar.CALCULATED, or a specified length.\n \"\"\"\n CommandObject.__init__(self)\n self.name = self.__class__.__name__.lower()\n if \"name\" in config:\n self.name = config[\"name\"]\n\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Widget.defaults)\n\n if length in (bar.CALCULATED, bar.STRETCH):\n self.length_type = length\n self.length = 0\n elif isinstance(length, int):\n self.length_type = bar.STATIC\n self.length = length\n else:\n raise confreader.ConfigError(\"Widget width must be an int\")\n\n self.configured = False\n self._futures: list[asyncio.Handle] = []\n self._mirrors: set[_Widget] = set()\n self.finalized = False\n\n @property\n def length(self):\n if self.length_type == bar.CALCULATED:\n return int(self.calculate_length())\n return self._length\n\n @length.setter\n def length(self, value):\n self._length = value\n\n @property\n def width(self):\n if self.bar.horizontal:\n return self.length\n return self.bar.width\n\n @property\n def height(self):\n if self.bar.horizontal:\n return self.bar.height\n return self.length\n\n @property\n def offset(self):\n if self.bar.horizontal:\n return self.offsetx\n return self.offsety\n\n def _test_orientation_compatibility(self, horizontal):\n if horizontal:\n if not self.orientations & ORIENTATION_HORIZONTAL:\n raise confreader.ConfigError(\n self.__class__.__name__\n + \" is not compatible with the orientation of the bar.\"\n )\n elif not self.orientations & ORIENTATION_VERTICAL:\n raise confreader.ConfigError(\n self.__class__.__name__ + \" is not compatible with the orientation of the bar.\"\n )\n\n def timer_setup(self):\n \"\"\"This is called exactly once, after the widget has been configured\n and timers are available to be set up.\"\"\"\n pass\n\n def _configure(self, qtile, bar):\n self._test_orientation_compatibility(bar.horizontal)\n\n self.qtile = qtile\n self.bar = bar\n self.drawer = bar.window.create_drawer(self.bar.width, self.bar.height)\n\n # Clear this flag as widget may be restarted (e.g. if screen removed and re-added)\n self.finalized = False\n\n # Timers are added to futures list so they can be cancelled if the `finalize` method is\n # called before the timers have fired.\n if not self.configured:\n timer = self.qtile.call_soon(self.timer_setup)\n async_timer = self.qtile.call_soon(asyncio.create_task, self._config_async())\n\n # Add these to our list of futures so they can be cancelled.\n self._futures.extend([timer, async_timer])\n\n async def _config_async(self):\n \"\"\"\n This is called once when the main eventloop has started. this\n happens after _configure has been run.\n\n Widgets that need to use asyncio coroutines after this point may\n wish to initialise the relevant code (e.g. connections to dbus\n using dbus_next) here.\n \"\"\"\n pass\n\n def finalize(self):\n for future in self._futures:\n future.cancel()\n if hasattr(self, \"layout\") and self.layout:\n self.layout.finalize()\n self.drawer.finalize()\n self.finalized = True\n\n # Reset configuration status so the widget can be reconfigured\n # e.g. when screen is re-added\n self.configured = False\n\n def clear(self):\n self.drawer.set_source_rgb(self.bar.background)\n self.drawer.fillrect(self.offsetx, self.offsety, self.width, self.height)\n\n @expose_command()\n def info(self):\n \"\"\"Info for this object.\"\"\"\n return dict(\n name=self.name,\n offset=self.offset,\n length=self.length,\n width=self.width,\n height=self.height,\n )\n\n def add_callbacks(self, defaults):\n \"\"\"Add default callbacks with a lower priority than user-specified callbacks.\"\"\"\n defaults.update(self.mouse_callbacks)\n self.mouse_callbacks = defaults\n\n def button_press(self, x, y, button):\n name = \"Button{0}\".format(button)\n if name in self.mouse_callbacks:\n cmd = self.mouse_callbacks[name]\n if isinstance(cmd, LazyCall):\n if cmd.check(self.qtile):\n status, val = self.qtile.server.call(\n (cmd.selectors, cmd.name, cmd.args, cmd.kwargs)\n )\n if status in (interface.ERROR, interface.EXCEPTION):\n logger.error(\"Mouse callback command error %s: %s\", cmd.name, val)\n else:\n cmd()\n\n def button_release(self, x, y, button):\n pass\n\n def get(self, q, name):\n \"\"\"\n Utility function for quick retrieval of a widget by name.\n \"\"\"\n w = q.widgets_map.get(name)\n if not w:\n raise CommandError(\"No such widget: %s\" % name)\n return w\n\n def _items(self, name: str) -> ItemT:\n if name == \"bar\":\n return True, []\n elif name == \"screen\":\n return True, []\n return None\n\n def _select(self, name, sel):\n if name == \"bar\":\n return self.bar\n elif name == \"screen\":\n return self.bar.screen\n\n def draw(self):\n \"\"\"\n Method that draws the widget. You may call this explicitly to\n redraw the widget, but only if the length of the widget hasn't\n changed. If it has, you must call bar.draw instead.\n \"\"\"\n raise NotImplementedError\n\n def calculate_length(self):\n \"\"\"\n Must be implemented if the widget can take CALCULATED for length.\n It must return the width of the widget if it's installed in a\n horizontal bar; it must return the height of the widget if it's\n installed in a vertical bar. Usually you will test the orientation\n of the bar with 'self.bar.horizontal'.\n \"\"\"\n raise NotImplementedError\n\n def timeout_add(self, seconds, method, method_args=()):\n \"\"\"\n This method calls ``.call_later`` with given arguments.\n \"\"\"\n # Don't add timers for finalised widgets\n if self.finalized:\n return\n\n future = self.qtile.call_later(seconds, self._wrapper, method, *method_args)\n\n self._futures.append(future)\n return future\n\n def call_process(self, command, **kwargs):\n \"\"\"\n This method uses `subprocess.check_output` to run the given command\n and return the string from stdout, which is decoded when using\n Python 3.\n \"\"\"\n return subprocess.check_output(command, **kwargs, encoding=\"utf-8\")\n\n def _remove_dead_timers(self):\n \"\"\"Remove completed and cancelled timers from the list.\"\"\"\n\n def is_ready(timer):\n return timer in self.qtile._eventloop._ready\n\n self._futures = [\n timer\n for timer in self._futures\n # Filter out certain handles...\n if not (\n timer.cancelled()\n # Once a scheduled timer is ready to be run its _scheduled flag is set to False\n # and it's added to the loop's `_ready` queue\n or (\n isinstance(timer, asyncio.TimerHandle)\n and not timer._scheduled\n and not is_ready(timer)\n )\n # Callbacks scheduled via `call_soon` are put into the loop's `_ready` queue\n # and are removed once they've been executed\n or (isinstance(timer, asyncio.Handle) and not is_ready(timer))\n )\n ]\n\n def _wrapper(self, method, *method_args):\n self._remove_dead_timers()\n try:\n if asyncio.iscoroutinefunction(method):\n create_task(method(*method_args))\n elif asyncio.iscoroutine(method):\n create_task(method)\n else:\n method(*method_args)\n except: # noqa: E722\n logger.exception(\"got exception from widget timer\")\n\n def create_mirror(self):\n return Mirror(self, background=self.background)\n\n def clone(self):\n return copy.copy(self)\n\n def mouse_enter(self, x, y):\n pass\n\n def mouse_leave(self, x, y):\n pass\n\n def _draw_with_mirrors(self) -> None:\n self._old_draw()\n for mirror in self._mirrors:\n if not mirror.configured:\n continue\n\n # If the widget and mirror are on the same bar then we could have an\n # infinite loop when we call bar.draw(). mirror.draw() will trigger a resize\n # if it's the wrong size.\n if mirror.length_type == bar.CALCULATED and mirror.bar is not self.bar:\n mirror.bar.draw()\n else:\n mirror.draw()\n\n def add_mirror(self, widget: _Widget):\n if not self._mirrors:\n self._old_draw = self.draw\n self.draw = self._draw_with_mirrors # type: ignore\n\n self._mirrors.add(widget)\n if not self.drawer.has_mirrors:\n self.drawer.has_mirrors = True\n\n def remove_mirror(self, widget: _Widget):\n try:\n self._mirrors.remove(widget)\n except KeyError:\n pass\n\n if not self._mirrors:\n self.drawer.has_mirrors = False\n\n if hasattr(self, \"_old_draw\"):\n # Deletes the reference to draw and falls back to the original\n del self.draw\n del self._old_draw\n\n\nUNSPECIFIED = bar.Obj(\"UNSPECIFIED\")\n\n\nclass _TextBox(_Widget):\n \"\"\"\n Base class for widgets that are just boxes containing text.\n \"\"\"\n\n orientations = ORIENTATION_BOTH\n defaults = [\n (\"font\", \"sans\", \"Default font\"),\n (\"fontsize\", None, \"Font size. Calculated if None.\"),\n (\"padding\", None, \"Padding. Calculated if None.\"),\n (\"foreground\", \"ffffff\", \"Foreground colour\"),\n (\"fontshadow\", None, \"font shadow color, default is None(no shadow)\"),\n (\"markup\", True, \"Whether or not to use pango markup\"),\n (\n \"fmt\",\n \"{}\",\n \"Format to apply to the string returned by the widget. Main purpose: applying markup. \"\n \"For a widget that returns ``foo``, using ``fmt='<i>{}</i>'`` would give you ``<i>foo</i>``. \"\n \"To control what the widget outputs in the first place, use the ``format`` paramater of the widget (if it has one).\",\n ),\n (\"max_chars\", 0, \"Maximum number of characters to display in widget.\"),\n (\n \"scroll\",\n False,\n \"Whether text should be scrolled. When True, you must set the widget's ``width``.\",\n ),\n (\n \"scroll_repeat\",\n True,\n \"Whether text should restart scrolling once the text has ended\",\n ),\n (\n \"scroll_delay\",\n 2,\n \"Number of seconds to pause before starting scrolling and restarting/clearing text at end\",\n ),\n (\"scroll_step\", 1, \"Number of pixels to scroll with each step\"),\n (\"scroll_interval\", 0.1, \"Time in seconds before next scrolling step\"),\n (\n \"scroll_clear\",\n False,\n \"Whether text should scroll completely away (True) or stop when the end of the text is shown (False)\",\n ),\n (\"scroll_hide\", False, \"Whether the widget should hide when scrolling has finished\"),\n (\n \"scroll_fixed_width\",\n False,\n \"When ``scroll=True`` the ``width`` parameter is a maximum width and, when text is shorter than this, the widget will resize. \"\n \"Setting ``scroll_fixed_width=True`` will force the widget to have a fixed width, regardless of the size of the text.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, text=\" \", width=bar.CALCULATED, **config):\n self.layout = None\n _Widget.__init__(self, width, **config)\n self.add_defaults(_TextBox.defaults)\n self.text = text\n self._is_scrolling = False\n self._should_scroll = False\n self._scroll_offset = 0\n self._scroll_queued = False\n self._scroll_timer = None\n self._scroll_width = width\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, value):\n if len(value) > self.max_chars > 0:\n value = value[: self.max_chars] + \"…\"\n self._text = value\n if self.layout:\n self.layout.text = self.formatted_text\n if self.scroll:\n self.check_width()\n self.reset_scroll()\n\n @property\n def formatted_text(self):\n return self.fmt.format(self._text)\n\n @property\n def foreground(self):\n return self._foreground\n\n @foreground.setter\n def foreground(self, fg):\n self._foreground = fg\n if self.layout:\n self.layout.colour = fg\n\n @property\n def font(self):\n return self._font\n\n @font.setter\n def font(self, value):\n self._font = value\n if self.layout:\n self.layout.font = value\n\n @property\n def fontshadow(self):\n return self._fontshadow\n\n @fontshadow.setter\n def fontshadow(self, value):\n self._fontshadow = value\n if self.layout:\n self.layout.font_shadow = value\n\n @property\n def actual_padding(self):\n if self.padding is None:\n return self.fontsize / 2\n else:\n return self.padding\n\n def _configure(self, qtile, bar):\n _Widget._configure(self, qtile, bar)\n if self.fontsize is None:\n self.fontsize = self.bar.height - self.bar.height / 5\n self.layout = self.drawer.textlayout(\n self.formatted_text,\n self.foreground,\n self.font,\n self.fontsize,\n self.fontshadow,\n markup=self.markup,\n )\n if not isinstance(self._scroll_width, int) and self.scroll:\n logger.warning(\"%s: You must specify a width when enabling scrolling.\", self.name)\n self.scroll = False\n\n if self.scroll:\n self.check_width()\n\n def check_width(self):\n \"\"\"\n Check whether the widget needs to have calculated or fixed width\n and whether the text should be scrolled.\n \"\"\"\n if self.layout.width > self._scroll_width:\n self.length_type = bar.STATIC\n self.length = self._scroll_width\n self._is_scrolling = True\n self._should_scroll = True\n else:\n if self.scroll_fixed_width:\n self.length_type = bar.STATIC\n self.length = self._scroll_width\n else:\n self.length_type = bar.CALCULATED\n self._should_scroll = False\n\n def calculate_length(self):\n if self.text:\n if self.bar.horizontal:\n return min(self.layout.width, self.bar.width) + self.actual_padding * 2\n else:\n return min(self.layout.width, self.bar.height) + self.actual_padding * 2\n else:\n return 0\n\n def can_draw(self):\n can_draw = (\n self.layout is not None and not self.layout.finalized() and self.offsetx is not None\n ) # if the bar hasn't placed us yet\n return can_draw\n\n def draw(self):\n if not self.can_draw():\n return\n self.drawer.clear(self.background or self.bar.background)\n\n # size = self.bar.height if self.bar.horizontal else self.bar.width\n self.drawer.ctx.save()\n\n if not self.bar.horizontal:\n # Left bar reads bottom to top\n if self.bar.screen.left is self.bar:\n self.drawer.ctx.rotate(-90 * math.pi / 180.0)\n self.drawer.ctx.translate(-self.length, 0)\n\n # Right bar is top to bottom\n else:\n self.drawer.ctx.translate(self.bar.width, 0)\n self.drawer.ctx.rotate(90 * math.pi / 180.0)\n\n # If we're scrolling, we clip the context to the scroll width less the padding\n # Move the text layout position (and we only see the clipped portion)\n if self._should_scroll:\n self.drawer.ctx.rectangle(\n self.actual_padding,\n 0,\n self._scroll_width - 2 * self.actual_padding,\n self.bar.size,\n )\n self.drawer.ctx.clip()\n\n size = self.bar.height if self.bar.horizontal else self.bar.width\n\n self.layout.draw(\n (self.actual_padding or 0) - self._scroll_offset,\n int(size / 2.0 - self.layout.height / 2.0) + 1,\n )\n self.drawer.ctx.restore()\n\n self.drawer.draw(\n offsetx=self.offsetx, offsety=self.offsety, width=self.width, height=self.height\n )\n\n # We only want to scroll if:\n # - User has asked us to scroll and the scroll width is smaller than the layout (should_scroll=True)\n # - We are still scrolling (is_scrolling=True)\n # - We haven't already queued the next scroll (scroll_queued=False)\n if self._should_scroll and self._is_scrolling and not self._scroll_queued:\n self._scroll_queued = True\n if self._scroll_offset == 0:\n interval = self.scroll_delay\n else:\n interval = self.scroll_interval\n self._scroll_timer = self.timeout_add(interval, self.do_scroll)\n\n def do_scroll(self):\n # Allow the next scroll tick to be queued\n self._scroll_queued = False\n\n # If we're still scrolling, adjust the next offset\n if self._is_scrolling:\n self._scroll_offset += self.scroll_step\n\n # Check whether we need to stop scrolling when:\n # - we've scrolled all the text off the widget (scroll_clear = True)\n # - the final pixel is visible (scroll_clear = False)\n if (self.scroll_clear and self._scroll_offset > self.layout.width) or (\n not self.scroll_clear\n and (self.layout.width - self._scroll_offset)\n < (self._scroll_width - 2 * self.actual_padding)\n ):\n self._is_scrolling = False\n\n # We've reached the end of the scroll so what next?\n if not self._is_scrolling:\n if self.scroll_repeat:\n # Pause and restart scrolling\n self._scroll_timer = self.timeout_add(self.scroll_delay, self.reset_scroll)\n elif self.scroll_hide:\n # Clear the text\n self._scroll_timer = self.timeout_add(self.scroll_delay, self.hide_scroll)\n # If neither of these options then the text is no longer updated.\n\n self.draw()\n\n def reset_scroll(self):\n self._scroll_offset = 0\n self._is_scrolling = True\n self._scroll_queued = False\n if self._scroll_timer:\n self._scroll_timer.cancel()\n self.draw()\n\n def hide_scroll(self):\n self.update(\"\")\n\n @expose_command()\n def set_font(self, font=UNSPECIFIED, fontsize=UNSPECIFIED, fontshadow=UNSPECIFIED):\n \"\"\"\n Change the font used by this widget. If font is None, the current\n font is used.\n \"\"\"\n if font is not UNSPECIFIED:\n self.font = font\n if fontsize is not UNSPECIFIED:\n self.fontsize = fontsize\n if fontshadow is not UNSPECIFIED:\n self.fontshadow = fontshadow\n self.bar.draw()\n\n @expose_command()\n def info(self):\n d = _Widget.info(self)\n d[\"foreground\"] = self.foreground\n d[\"text\"] = self.formatted_text\n return d\n\n def update(self, text):\n \"\"\"Update the widget text.\"\"\"\n # Don't try to update text in dead layouts\n # This is mainly required for ThreadPoolText based widgets as the\n # polling function cannot be cancelled and so may be called after the widget\n # is finalised.\n if not self.can_draw():\n return\n\n if self.text == text:\n return\n if text is None:\n text = \"\"\n\n old_width = self.layout.width\n self.text = text\n\n # If our width hasn't changed, we just draw ourselves. Otherwise,\n # we draw the whole bar.\n if self.layout.width == old_width:\n self.draw()\n else:\n self.bar.draw()\n\n\nclass InLoopPollText(_TextBox):\n \"\"\"A common interface for polling some 'fast' information, munging it, and\n rendering the result in a text box. You probably want to use\n ThreadPoolText instead.\n\n ('fast' here means that this runs /in/ the event loop, so don't block! If\n you want to run something nontrivial, use ThreadedPollWidget.)\"\"\"\n\n defaults = [\n (\n \"update_interval\",\n 600,\n \"Update interval in seconds, if none, the widget updates only once.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, default_text=\"N/A\", **config):\n _TextBox.__init__(self, default_text, **config)\n self.add_defaults(InLoopPollText.defaults)\n\n def timer_setup(self):\n update_interval = self.tick()\n # If self.update_interval is defined and .tick() returns None, re-call\n # after self.update_interval\n if update_interval is None and self.update_interval is not None:\n self.timeout_add(self.update_interval, self.timer_setup)\n # We can change the update interval by returning something from .tick()\n elif update_interval:\n self.timeout_add(update_interval, self.timer_setup)\n # If update_interval is False, we won't re-call\n\n def _configure(self, qtile, bar):\n should_tick = self.configured\n _TextBox._configure(self, qtile, bar)\n\n # Update when we are being re-configured.\n if should_tick:\n self.tick()\n\n def button_press(self, x, y, button):\n self.tick()\n _TextBox.button_press(self, x, y, button)\n\n def poll(self):\n return \"N/A\"\n\n def tick(self):\n text = self.poll()\n self.update(text)\n\n\nclass ThreadPoolText(_TextBox):\n \"\"\"A common interface for wrapping blocking events which when triggered\n will update a textbox.\n\n The poll method is intended to wrap a blocking function which may take\n quite a while to return anything. It will be executed as a future and\n should return updated text when completed. It may also return None to\n disable any further updates.\n\n param: text - Initial text to display.\n \"\"\"\n\n defaults = [\n (\n \"update_interval\",\n 600,\n \"Update interval in seconds, if none, the widget updates only once.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, text, **config):\n super().__init__(text, **config)\n self.add_defaults(ThreadPoolText.defaults)\n\n def timer_setup(self):\n def on_done(future):\n try:\n result = future.result()\n except Exception:\n result = None\n logger.exception(\"poll() raised exceptions, not rescheduling\")\n\n if result is not None:\n try:\n self.update(result)\n\n if self.update_interval is not None:\n self.timeout_add(self.update_interval, self.timer_setup)\n\n except Exception:\n logger.exception(\"Failed to reschedule timer for %s.\", self.name)\n else:\n logger.warning(\"%s's poll() returned None, not rescheduling\", self.name)\n\n self.future = self.qtile.run_in_executor(self.poll)\n self.future.add_done_callback(on_done)\n\n def poll(self):\n pass\n\n @expose_command()\n def force_update(self):\n \"\"\"Immediately poll the widget. Existing timers are unaffected.\"\"\"\n self.update(self.poll())\n\n\n# these two classes below look SUSPICIOUSLY similar\n\n\nclass PaddingMixin(configurable.Configurable):\n \"\"\"Mixin that provides padding(_x|_y|)\n\n To use it, subclass and add this to __init__:\n\n self.add_defaults(base.PaddingMixin.defaults)\n \"\"\"\n\n defaults = [\n (\"padding\", 3, \"Padding inside the box\"),\n (\"padding_x\", None, \"X Padding. Overrides 'padding' if set\"),\n (\"padding_y\", None, \"Y Padding. Overrides 'padding' if set\"),\n ] # type: list[tuple[str, Any, str]]\n\n padding_x = configurable.ExtraFallback(\"padding_x\", \"padding\")\n padding_y = configurable.ExtraFallback(\"padding_y\", \"padding\")\n\n\nclass MarginMixin(configurable.Configurable):\n \"\"\"Mixin that provides margin(_x|_y|)\n\n To use it, subclass and add this to __init__:\n\n self.add_defaults(base.MarginMixin.defaults)\n \"\"\"\n\n defaults = [\n (\"margin\", 3, \"Margin inside the box\"),\n (\"margin_x\", None, \"X Margin. Overrides 'margin' if set\"),\n (\"margin_y\", None, \"Y Margin. Overrides 'margin' if set\"),\n ] # type: list[tuple[str, Any, str]]\n\n margin_x = configurable.ExtraFallback(\"margin_x\", \"margin\")\n margin_y = configurable.ExtraFallback(\"margin_y\", \"margin\")\n\n\nclass Mirror(_Widget):\n \"\"\"\n A widget for showing the same widget content in more than one place, for\n instance, on bars across multiple screens.\n\n You don't need to use it directly; instead, just instantiate your widget\n once and hand it in to multiple bars. For instance::\n\n cpu = widget.CPUGraph()\n clock = widget.Clock()\n\n screens = [\n Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),\n Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),\n ]\n\n Widgets can be passed to more than one bar, so that there don't need to be\n any duplicates executing the same code all the time, and they'll always be\n visually identical.\n\n This works for all widgets that use `drawers` (and nothing else) to display\n their contents. Currently, this is all widgets except for `Systray`.\n \"\"\"\n\n def __init__(self, reflection, **config):\n _Widget.__init__(self, reflection.length, **config)\n self.reflects = reflection\n self._length = 0\n self.length_type = self.reflects.length_type\n\n def _configure(self, qtile, bar):\n _Widget._configure(self, qtile, bar)\n self.reflects.add_mirror(self)\n # We need to fill the background once before `draw` is called so, if\n # there's no reflection, the mirror matches its parent bar.\n self.drawer.clear(self.background or self.bar.background)\n\n def calculate_length(self):\n return self.reflects.calculate_length()\n\n @property\n def length(self):\n if self.length_type != bar.STRETCH:\n return self.reflects.length\n return self._length\n\n @length.setter\n def length(self, value):\n self._length = value\n\n def draw(self):\n self.drawer.clear(self.reflects.background or self.bar.background)\n self.reflects.drawer.paint_to(self.drawer)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n\n def button_press(self, x, y, button):\n self.reflects.button_press(x, y, button)\n\n def mouse_enter(self, x, y):\n self.reflects.mouse_enter(x, y)\n\n def mouse_leave(self, x, y):\n self.reflects.mouse_leave(x, y)\n\n def finalize(self):\n self.reflects.remove_mirror(self)\n _Widget.finalize(self)\n", "path": "libqtile/widget/base.py" } ]
[ { "content": "# Copyright (c) 2008-2010 Aldo Cortesi\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Kenji_Takahashi\n# Copyright (c) 2011 Paul Colomiets\n# Copyright (c) 2012 roger\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012-2015 Tycho Andersen\n# Copyright (c) 2013 dequis\n# Copyright (c) 2013 David R. Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Justin Bronder\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport copy\nimport math\nimport subprocess\nfrom typing import TYPE_CHECKING\n\nfrom libqtile import bar, configurable, confreader\nfrom libqtile.command import interface\nfrom libqtile.command.base import CommandError, CommandObject, expose_command\nfrom libqtile.lazy import LazyCall\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import create_task\n\nif TYPE_CHECKING:\n from typing import Any\n\n from libqtile.command.base import ItemT\n\n# Each widget class must define which bar orientation(s) it supports by setting\n# these bits in an 'orientations' class attribute. Simply having the attribute\n# inherited by superclasses is discouraged, because if a superclass that was\n# only supporting one orientation, adds support for the other, its subclasses\n# will have to be adapted too, in general. ORIENTATION_NONE is only added for\n# completeness' sake.\n# +------------------------+--------------------+--------------------+\n# | Widget bits | Horizontal bar | Vertical bar |\n# +========================+====================+====================+\n# | ORIENTATION_NONE | ConfigError raised | ConfigError raised |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_HORIZONTAL | Widget displayed | ConfigError raised |\n# | | horizontally | |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_VERTICAL | ConfigError raised | Widget displayed |\n# | | | vertically |\n# +------------------------+--------------------+--------------------+\n# | ORIENTATION_BOTH | Widget displayed | Widget displayed |\n# | | horizontally | vertically |\n# +------------------------+--------------------+--------------------+\n\n\nclass _Orientations(int):\n def __new__(cls, value, doc):\n return super().__new__(cls, value)\n\n def __init__(self, value, doc):\n self.doc = doc\n\n def __str__(self):\n return self.doc\n\n def __repr__(self):\n return self.doc\n\n\nORIENTATION_NONE = _Orientations(0, \"none\")\nORIENTATION_HORIZONTAL = _Orientations(1, \"horizontal only\")\nORIENTATION_VERTICAL = _Orientations(2, \"vertical only\")\nORIENTATION_BOTH = _Orientations(3, \"horizontal and vertical\")\n\n\nclass _Widget(CommandObject, configurable.Configurable):\n \"\"\"Base Widget class\n\n If length is set to the special value `bar.STRETCH`, the bar itself will\n set the length to the maximum remaining space, after all other widgets have\n been configured.\n\n In horizontal bars, 'length' corresponds to the width of the widget; in\n vertical bars, it corresponds to the widget's height.\n\n The offsetx and offsety attributes are set by the Bar after all widgets\n have been configured.\n\n Callback functions can be assigned to button presses by passing a dict to the\n 'callbacks' kwarg. No arguments are passed to the function so, if\n you need access to the qtile object, it needs to be imported into your code.\n\n ``lazy`` functions can also be passed as callback functions and can be used in\n the same way as keybindings.\n\n For example:\n\n .. code-block:: python\n\n from libqtile import qtile\n\n def open_calendar():\n qtile.spawn('gsimplecal next_month')\n\n clock = widget.Clock(\n mouse_callbacks={\n 'Button1': open_calendar,\n 'Button3': lazy.spawn('gsimplecal prev_month')\n }\n )\n\n When the clock widget receives a click with button 1, the ``open_calendar`` function\n will be executed.\n \"\"\"\n\n orientations = ORIENTATION_BOTH\n\n # Default (empty set) is for all backends to be supported. Widgets can override this\n # to explicitly confirm which backends are supported\n supported_backends: set[str] = set()\n\n offsetx: int = 0\n offsety: int = 0\n defaults: list[tuple[str, Any, str]] = [\n (\"background\", None, \"Widget background color\"),\n (\n \"mouse_callbacks\",\n {},\n \"Dict of mouse button press callback functions. Accepts functions and ``lazy`` calls.\",\n ),\n ]\n\n def __init__(self, length, **config):\n \"\"\"\n length: bar.STRETCH, bar.CALCULATED, or a specified length.\n \"\"\"\n CommandObject.__init__(self)\n self.name = self.__class__.__name__.lower()\n if \"name\" in config:\n self.name = config[\"name\"]\n\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Widget.defaults)\n\n if length in (bar.CALCULATED, bar.STRETCH):\n self.length_type = length\n self.length = 0\n elif isinstance(length, int):\n self.length_type = bar.STATIC\n self.length = length\n else:\n raise confreader.ConfigError(\"Widget width must be an int\")\n\n self.configured = False\n self._futures: list[asyncio.Handle] = []\n self._mirrors: set[_Widget] = set()\n self.finalized = False\n\n @property\n def length(self):\n if self.length_type == bar.CALCULATED:\n return int(self.calculate_length())\n return self._length\n\n @length.setter\n def length(self, value):\n self._length = value\n\n @property\n def width(self):\n if self.bar.horizontal:\n return self.length\n return self.bar.width\n\n @property\n def height(self):\n if self.bar.horizontal:\n return self.bar.height\n return self.length\n\n @property\n def offset(self):\n if self.bar.horizontal:\n return self.offsetx\n return self.offsety\n\n def _test_orientation_compatibility(self, horizontal):\n if horizontal:\n if not self.orientations & ORIENTATION_HORIZONTAL:\n raise confreader.ConfigError(\n self.__class__.__name__\n + \" is not compatible with the orientation of the bar.\"\n )\n elif not self.orientations & ORIENTATION_VERTICAL:\n raise confreader.ConfigError(\n self.__class__.__name__ + \" is not compatible with the orientation of the bar.\"\n )\n\n def timer_setup(self):\n \"\"\"This is called exactly once, after the widget has been configured\n and timers are available to be set up.\"\"\"\n pass\n\n def _configure(self, qtile, bar):\n self._test_orientation_compatibility(bar.horizontal)\n\n self.qtile = qtile\n self.bar = bar\n self.drawer = bar.window.create_drawer(self.bar.width, self.bar.height)\n\n # Clear this flag as widget may be restarted (e.g. if screen removed and re-added)\n self.finalized = False\n\n # Timers are added to futures list so they can be cancelled if the `finalize` method is\n # called before the timers have fired.\n if not self.configured:\n timer = self.qtile.call_soon(self.timer_setup)\n async_timer = self.qtile.call_soon(asyncio.create_task, self._config_async())\n\n # Add these to our list of futures so they can be cancelled.\n self._futures.extend([timer, async_timer])\n\n async def _config_async(self):\n \"\"\"\n This is called once when the main eventloop has started. this\n happens after _configure has been run.\n\n Widgets that need to use asyncio coroutines after this point may\n wish to initialise the relevant code (e.g. connections to dbus\n using dbus_next) here.\n \"\"\"\n pass\n\n def finalize(self):\n for future in self._futures:\n future.cancel()\n if hasattr(self, \"layout\") and self.layout:\n self.layout.finalize()\n self.drawer.finalize()\n self.finalized = True\n\n # Reset configuration status so the widget can be reconfigured\n # e.g. when screen is re-added\n self.configured = False\n\n def clear(self):\n self.drawer.set_source_rgb(self.bar.background)\n self.drawer.fillrect(self.offsetx, self.offsety, self.width, self.height)\n\n @expose_command()\n def info(self):\n \"\"\"Info for this object.\"\"\"\n return dict(\n name=self.name,\n offset=self.offset,\n length=self.length,\n width=self.width,\n height=self.height,\n )\n\n def add_callbacks(self, defaults):\n \"\"\"Add default callbacks with a lower priority than user-specified callbacks.\"\"\"\n defaults.update(self.mouse_callbacks)\n self.mouse_callbacks = defaults\n\n def button_press(self, x, y, button):\n name = \"Button{0}\".format(button)\n if name in self.mouse_callbacks:\n cmd = self.mouse_callbacks[name]\n if isinstance(cmd, LazyCall):\n if cmd.check(self.qtile):\n status, val = self.qtile.server.call(\n (cmd.selectors, cmd.name, cmd.args, cmd.kwargs)\n )\n if status in (interface.ERROR, interface.EXCEPTION):\n logger.error(\"Mouse callback command error %s: %s\", cmd.name, val)\n else:\n cmd()\n\n def button_release(self, x, y, button):\n pass\n\n def get(self, q, name):\n \"\"\"\n Utility function for quick retrieval of a widget by name.\n \"\"\"\n w = q.widgets_map.get(name)\n if not w:\n raise CommandError(\"No such widget: %s\" % name)\n return w\n\n def _items(self, name: str) -> ItemT:\n if name == \"bar\":\n return True, []\n elif name == \"screen\":\n return True, []\n return None\n\n def _select(self, name, sel):\n if name == \"bar\":\n return self.bar\n elif name == \"screen\":\n return self.bar.screen\n\n def draw(self):\n \"\"\"\n Method that draws the widget. You may call this explicitly to\n redraw the widget, but only if the length of the widget hasn't\n changed. If it has, you must call bar.draw instead.\n \"\"\"\n raise NotImplementedError\n\n def calculate_length(self):\n \"\"\"\n Must be implemented if the widget can take CALCULATED for length.\n It must return the width of the widget if it's installed in a\n horizontal bar; it must return the height of the widget if it's\n installed in a vertical bar. Usually you will test the orientation\n of the bar with 'self.bar.horizontal'.\n \"\"\"\n raise NotImplementedError\n\n def timeout_add(self, seconds, method, method_args=()):\n \"\"\"\n This method calls ``.call_later`` with given arguments.\n \"\"\"\n # Don't add timers for finalised widgets\n if self.finalized:\n return\n\n future = self.qtile.call_later(seconds, self._wrapper, method, *method_args)\n\n self._futures.append(future)\n return future\n\n def call_process(self, command, **kwargs):\n \"\"\"\n This method uses `subprocess.check_output` to run the given command\n and return the string from stdout, which is decoded when using\n Python 3.\n \"\"\"\n return subprocess.check_output(command, **kwargs, encoding=\"utf-8\")\n\n def _remove_dead_timers(self):\n \"\"\"Remove completed and cancelled timers from the list.\"\"\"\n\n def is_ready(timer):\n return timer in self.qtile._eventloop._ready\n\n self._futures = [\n timer\n for timer in self._futures\n # Filter out certain handles...\n if not (\n timer.cancelled()\n # Once a scheduled timer is ready to be run its _scheduled flag is set to False\n # and it's added to the loop's `_ready` queue\n or (\n isinstance(timer, asyncio.TimerHandle)\n and not timer._scheduled\n and not is_ready(timer)\n )\n # Callbacks scheduled via `call_soon` are put into the loop's `_ready` queue\n # and are removed once they've been executed\n or (isinstance(timer, asyncio.Handle) and not is_ready(timer))\n )\n ]\n\n def _wrapper(self, method, *method_args):\n self._remove_dead_timers()\n try:\n if asyncio.iscoroutinefunction(method):\n create_task(method(*method_args))\n elif asyncio.iscoroutine(method):\n create_task(method)\n else:\n method(*method_args)\n except: # noqa: E722\n logger.exception(\"got exception from widget timer\")\n\n def create_mirror(self):\n return Mirror(self, background=self.background)\n\n def clone(self):\n return copy.deepcopy(self)\n\n def mouse_enter(self, x, y):\n pass\n\n def mouse_leave(self, x, y):\n pass\n\n def _draw_with_mirrors(self) -> None:\n self._old_draw()\n for mirror in self._mirrors:\n if not mirror.configured:\n continue\n\n # If the widget and mirror are on the same bar then we could have an\n # infinite loop when we call bar.draw(). mirror.draw() will trigger a resize\n # if it's the wrong size.\n if mirror.length_type == bar.CALCULATED and mirror.bar is not self.bar:\n mirror.bar.draw()\n else:\n mirror.draw()\n\n def add_mirror(self, widget: _Widget):\n if not self._mirrors:\n self._old_draw = self.draw\n self.draw = self._draw_with_mirrors # type: ignore\n\n self._mirrors.add(widget)\n if not self.drawer.has_mirrors:\n self.drawer.has_mirrors = True\n\n def remove_mirror(self, widget: _Widget):\n try:\n self._mirrors.remove(widget)\n except KeyError:\n pass\n\n if not self._mirrors:\n self.drawer.has_mirrors = False\n\n if hasattr(self, \"_old_draw\"):\n # Deletes the reference to draw and falls back to the original\n del self.draw\n del self._old_draw\n\n\nUNSPECIFIED = bar.Obj(\"UNSPECIFIED\")\n\n\nclass _TextBox(_Widget):\n \"\"\"\n Base class for widgets that are just boxes containing text.\n \"\"\"\n\n orientations = ORIENTATION_BOTH\n defaults = [\n (\"font\", \"sans\", \"Default font\"),\n (\"fontsize\", None, \"Font size. Calculated if None.\"),\n (\"padding\", None, \"Padding. Calculated if None.\"),\n (\"foreground\", \"ffffff\", \"Foreground colour\"),\n (\"fontshadow\", None, \"font shadow color, default is None(no shadow)\"),\n (\"markup\", True, \"Whether or not to use pango markup\"),\n (\n \"fmt\",\n \"{}\",\n \"Format to apply to the string returned by the widget. Main purpose: applying markup. \"\n \"For a widget that returns ``foo``, using ``fmt='<i>{}</i>'`` would give you ``<i>foo</i>``. \"\n \"To control what the widget outputs in the first place, use the ``format`` paramater of the widget (if it has one).\",\n ),\n (\"max_chars\", 0, \"Maximum number of characters to display in widget.\"),\n (\n \"scroll\",\n False,\n \"Whether text should be scrolled. When True, you must set the widget's ``width``.\",\n ),\n (\n \"scroll_repeat\",\n True,\n \"Whether text should restart scrolling once the text has ended\",\n ),\n (\n \"scroll_delay\",\n 2,\n \"Number of seconds to pause before starting scrolling and restarting/clearing text at end\",\n ),\n (\"scroll_step\", 1, \"Number of pixels to scroll with each step\"),\n (\"scroll_interval\", 0.1, \"Time in seconds before next scrolling step\"),\n (\n \"scroll_clear\",\n False,\n \"Whether text should scroll completely away (True) or stop when the end of the text is shown (False)\",\n ),\n (\"scroll_hide\", False, \"Whether the widget should hide when scrolling has finished\"),\n (\n \"scroll_fixed_width\",\n False,\n \"When ``scroll=True`` the ``width`` parameter is a maximum width and, when text is shorter than this, the widget will resize. \"\n \"Setting ``scroll_fixed_width=True`` will force the widget to have a fixed width, regardless of the size of the text.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, text=\" \", width=bar.CALCULATED, **config):\n self.layout = None\n _Widget.__init__(self, width, **config)\n self.add_defaults(_TextBox.defaults)\n self.text = text\n self._is_scrolling = False\n self._should_scroll = False\n self._scroll_offset = 0\n self._scroll_queued = False\n self._scroll_timer = None\n self._scroll_width = width\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, value):\n if len(value) > self.max_chars > 0:\n value = value[: self.max_chars] + \"…\"\n self._text = value\n if self.layout:\n self.layout.text = self.formatted_text\n if self.scroll:\n self.check_width()\n self.reset_scroll()\n\n @property\n def formatted_text(self):\n return self.fmt.format(self._text)\n\n @property\n def foreground(self):\n return self._foreground\n\n @foreground.setter\n def foreground(self, fg):\n self._foreground = fg\n if self.layout:\n self.layout.colour = fg\n\n @property\n def font(self):\n return self._font\n\n @font.setter\n def font(self, value):\n self._font = value\n if self.layout:\n self.layout.font = value\n\n @property\n def fontshadow(self):\n return self._fontshadow\n\n @fontshadow.setter\n def fontshadow(self, value):\n self._fontshadow = value\n if self.layout:\n self.layout.font_shadow = value\n\n @property\n def actual_padding(self):\n if self.padding is None:\n return self.fontsize / 2\n else:\n return self.padding\n\n def _configure(self, qtile, bar):\n _Widget._configure(self, qtile, bar)\n if self.fontsize is None:\n self.fontsize = self.bar.height - self.bar.height / 5\n self.layout = self.drawer.textlayout(\n self.formatted_text,\n self.foreground,\n self.font,\n self.fontsize,\n self.fontshadow,\n markup=self.markup,\n )\n if not isinstance(self._scroll_width, int) and self.scroll:\n logger.warning(\"%s: You must specify a width when enabling scrolling.\", self.name)\n self.scroll = False\n\n if self.scroll:\n self.check_width()\n\n def check_width(self):\n \"\"\"\n Check whether the widget needs to have calculated or fixed width\n and whether the text should be scrolled.\n \"\"\"\n if self.layout.width > self._scroll_width:\n self.length_type = bar.STATIC\n self.length = self._scroll_width\n self._is_scrolling = True\n self._should_scroll = True\n else:\n if self.scroll_fixed_width:\n self.length_type = bar.STATIC\n self.length = self._scroll_width\n else:\n self.length_type = bar.CALCULATED\n self._should_scroll = False\n\n def calculate_length(self):\n if self.text:\n if self.bar.horizontal:\n return min(self.layout.width, self.bar.width) + self.actual_padding * 2\n else:\n return min(self.layout.width, self.bar.height) + self.actual_padding * 2\n else:\n return 0\n\n def can_draw(self):\n can_draw = (\n self.layout is not None and not self.layout.finalized() and self.offsetx is not None\n ) # if the bar hasn't placed us yet\n return can_draw\n\n def draw(self):\n if not self.can_draw():\n return\n self.drawer.clear(self.background or self.bar.background)\n\n # size = self.bar.height if self.bar.horizontal else self.bar.width\n self.drawer.ctx.save()\n\n if not self.bar.horizontal:\n # Left bar reads bottom to top\n if self.bar.screen.left is self.bar:\n self.drawer.ctx.rotate(-90 * math.pi / 180.0)\n self.drawer.ctx.translate(-self.length, 0)\n\n # Right bar is top to bottom\n else:\n self.drawer.ctx.translate(self.bar.width, 0)\n self.drawer.ctx.rotate(90 * math.pi / 180.0)\n\n # If we're scrolling, we clip the context to the scroll width less the padding\n # Move the text layout position (and we only see the clipped portion)\n if self._should_scroll:\n self.drawer.ctx.rectangle(\n self.actual_padding,\n 0,\n self._scroll_width - 2 * self.actual_padding,\n self.bar.size,\n )\n self.drawer.ctx.clip()\n\n size = self.bar.height if self.bar.horizontal else self.bar.width\n\n self.layout.draw(\n (self.actual_padding or 0) - self._scroll_offset,\n int(size / 2.0 - self.layout.height / 2.0) + 1,\n )\n self.drawer.ctx.restore()\n\n self.drawer.draw(\n offsetx=self.offsetx, offsety=self.offsety, width=self.width, height=self.height\n )\n\n # We only want to scroll if:\n # - User has asked us to scroll and the scroll width is smaller than the layout (should_scroll=True)\n # - We are still scrolling (is_scrolling=True)\n # - We haven't already queued the next scroll (scroll_queued=False)\n if self._should_scroll and self._is_scrolling and not self._scroll_queued:\n self._scroll_queued = True\n if self._scroll_offset == 0:\n interval = self.scroll_delay\n else:\n interval = self.scroll_interval\n self._scroll_timer = self.timeout_add(interval, self.do_scroll)\n\n def do_scroll(self):\n # Allow the next scroll tick to be queued\n self._scroll_queued = False\n\n # If we're still scrolling, adjust the next offset\n if self._is_scrolling:\n self._scroll_offset += self.scroll_step\n\n # Check whether we need to stop scrolling when:\n # - we've scrolled all the text off the widget (scroll_clear = True)\n # - the final pixel is visible (scroll_clear = False)\n if (self.scroll_clear and self._scroll_offset > self.layout.width) or (\n not self.scroll_clear\n and (self.layout.width - self._scroll_offset)\n < (self._scroll_width - 2 * self.actual_padding)\n ):\n self._is_scrolling = False\n\n # We've reached the end of the scroll so what next?\n if not self._is_scrolling:\n if self.scroll_repeat:\n # Pause and restart scrolling\n self._scroll_timer = self.timeout_add(self.scroll_delay, self.reset_scroll)\n elif self.scroll_hide:\n # Clear the text\n self._scroll_timer = self.timeout_add(self.scroll_delay, self.hide_scroll)\n # If neither of these options then the text is no longer updated.\n\n self.draw()\n\n def reset_scroll(self):\n self._scroll_offset = 0\n self._is_scrolling = True\n self._scroll_queued = False\n if self._scroll_timer:\n self._scroll_timer.cancel()\n self.draw()\n\n def hide_scroll(self):\n self.update(\"\")\n\n @expose_command()\n def set_font(self, font=UNSPECIFIED, fontsize=UNSPECIFIED, fontshadow=UNSPECIFIED):\n \"\"\"\n Change the font used by this widget. If font is None, the current\n font is used.\n \"\"\"\n if font is not UNSPECIFIED:\n self.font = font\n if fontsize is not UNSPECIFIED:\n self.fontsize = fontsize\n if fontshadow is not UNSPECIFIED:\n self.fontshadow = fontshadow\n self.bar.draw()\n\n @expose_command()\n def info(self):\n d = _Widget.info(self)\n d[\"foreground\"] = self.foreground\n d[\"text\"] = self.formatted_text\n return d\n\n def update(self, text):\n \"\"\"Update the widget text.\"\"\"\n # Don't try to update text in dead layouts\n # This is mainly required for ThreadPoolText based widgets as the\n # polling function cannot be cancelled and so may be called after the widget\n # is finalised.\n if not self.can_draw():\n return\n\n if self.text == text:\n return\n if text is None:\n text = \"\"\n\n old_width = self.layout.width\n self.text = text\n\n # If our width hasn't changed, we just draw ourselves. Otherwise,\n # we draw the whole bar.\n if self.layout.width == old_width:\n self.draw()\n else:\n self.bar.draw()\n\n\nclass InLoopPollText(_TextBox):\n \"\"\"A common interface for polling some 'fast' information, munging it, and\n rendering the result in a text box. You probably want to use\n ThreadPoolText instead.\n\n ('fast' here means that this runs /in/ the event loop, so don't block! If\n you want to run something nontrivial, use ThreadedPollWidget.)\"\"\"\n\n defaults = [\n (\n \"update_interval\",\n 600,\n \"Update interval in seconds, if none, the widget updates only once.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, default_text=\"N/A\", **config):\n _TextBox.__init__(self, default_text, **config)\n self.add_defaults(InLoopPollText.defaults)\n\n def timer_setup(self):\n update_interval = self.tick()\n # If self.update_interval is defined and .tick() returns None, re-call\n # after self.update_interval\n if update_interval is None and self.update_interval is not None:\n self.timeout_add(self.update_interval, self.timer_setup)\n # We can change the update interval by returning something from .tick()\n elif update_interval:\n self.timeout_add(update_interval, self.timer_setup)\n # If update_interval is False, we won't re-call\n\n def _configure(self, qtile, bar):\n should_tick = self.configured\n _TextBox._configure(self, qtile, bar)\n\n # Update when we are being re-configured.\n if should_tick:\n self.tick()\n\n def button_press(self, x, y, button):\n self.tick()\n _TextBox.button_press(self, x, y, button)\n\n def poll(self):\n return \"N/A\"\n\n def tick(self):\n text = self.poll()\n self.update(text)\n\n\nclass ThreadPoolText(_TextBox):\n \"\"\"A common interface for wrapping blocking events which when triggered\n will update a textbox.\n\n The poll method is intended to wrap a blocking function which may take\n quite a while to return anything. It will be executed as a future and\n should return updated text when completed. It may also return None to\n disable any further updates.\n\n param: text - Initial text to display.\n \"\"\"\n\n defaults = [\n (\n \"update_interval\",\n 600,\n \"Update interval in seconds, if none, the widget updates only once.\",\n ),\n ] # type: list[tuple[str, Any, str]]\n\n def __init__(self, text, **config):\n super().__init__(text, **config)\n self.add_defaults(ThreadPoolText.defaults)\n\n def timer_setup(self):\n def on_done(future):\n try:\n result = future.result()\n except Exception:\n result = None\n logger.exception(\"poll() raised exceptions, not rescheduling\")\n\n if result is not None:\n try:\n self.update(result)\n\n if self.update_interval is not None:\n self.timeout_add(self.update_interval, self.timer_setup)\n\n except Exception:\n logger.exception(\"Failed to reschedule timer for %s.\", self.name)\n else:\n logger.warning(\"%s's poll() returned None, not rescheduling\", self.name)\n\n self.future = self.qtile.run_in_executor(self.poll)\n self.future.add_done_callback(on_done)\n\n def poll(self):\n pass\n\n @expose_command()\n def force_update(self):\n \"\"\"Immediately poll the widget. Existing timers are unaffected.\"\"\"\n self.update(self.poll())\n\n\n# these two classes below look SUSPICIOUSLY similar\n\n\nclass PaddingMixin(configurable.Configurable):\n \"\"\"Mixin that provides padding(_x|_y|)\n\n To use it, subclass and add this to __init__:\n\n self.add_defaults(base.PaddingMixin.defaults)\n \"\"\"\n\n defaults = [\n (\"padding\", 3, \"Padding inside the box\"),\n (\"padding_x\", None, \"X Padding. Overrides 'padding' if set\"),\n (\"padding_y\", None, \"Y Padding. Overrides 'padding' if set\"),\n ] # type: list[tuple[str, Any, str]]\n\n padding_x = configurable.ExtraFallback(\"padding_x\", \"padding\")\n padding_y = configurable.ExtraFallback(\"padding_y\", \"padding\")\n\n\nclass MarginMixin(configurable.Configurable):\n \"\"\"Mixin that provides margin(_x|_y|)\n\n To use it, subclass and add this to __init__:\n\n self.add_defaults(base.MarginMixin.defaults)\n \"\"\"\n\n defaults = [\n (\"margin\", 3, \"Margin inside the box\"),\n (\"margin_x\", None, \"X Margin. Overrides 'margin' if set\"),\n (\"margin_y\", None, \"Y Margin. Overrides 'margin' if set\"),\n ] # type: list[tuple[str, Any, str]]\n\n margin_x = configurable.ExtraFallback(\"margin_x\", \"margin\")\n margin_y = configurable.ExtraFallback(\"margin_y\", \"margin\")\n\n\nclass Mirror(_Widget):\n \"\"\"\n A widget for showing the same widget content in more than one place, for\n instance, on bars across multiple screens.\n\n You don't need to use it directly; instead, just instantiate your widget\n once and hand it in to multiple bars. For instance::\n\n cpu = widget.CPUGraph()\n clock = widget.Clock()\n\n screens = [\n Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),\n Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),\n ]\n\n Widgets can be passed to more than one bar, so that there don't need to be\n any duplicates executing the same code all the time, and they'll always be\n visually identical.\n\n This works for all widgets that use `drawers` (and nothing else) to display\n their contents. Currently, this is all widgets except for `Systray`.\n \"\"\"\n\n def __init__(self, reflection, **config):\n _Widget.__init__(self, reflection.length, **config)\n self.reflects = reflection\n self._length = 0\n self.length_type = self.reflects.length_type\n\n def _configure(self, qtile, bar):\n _Widget._configure(self, qtile, bar)\n self.reflects.add_mirror(self)\n # We need to fill the background once before `draw` is called so, if\n # there's no reflection, the mirror matches its parent bar.\n self.drawer.clear(self.background or self.bar.background)\n\n def calculate_length(self):\n return self.reflects.calculate_length()\n\n @property\n def length(self):\n if self.length_type != bar.STRETCH:\n return self.reflects.length\n return self._length\n\n @length.setter\n def length(self, value):\n self._length = value\n\n def draw(self):\n self.drawer.clear(self.reflects.background or self.bar.background)\n self.reflects.drawer.paint_to(self.drawer)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n\n def button_press(self, x, y, button):\n self.reflects.button_press(x, y, button)\n\n def mouse_enter(self, x, y):\n self.reflects.mouse_enter(x, y)\n\n def mouse_leave(self, x, y):\n self.reflects.mouse_leave(x, y)\n\n def finalize(self):\n self.reflects.remove_mirror(self)\n _Widget.finalize(self)\n", "path": "libqtile/widget/base.py" } ]
diff --git a/libqtile/widget/base.py b/libqtile/widget/base.py index 558840c9ca..44587229f0 100644 --- a/libqtile/widget/base.py +++ b/libqtile/widget/base.py @@ -400,7 +400,7 @@ def create_mirror(self): return Mirror(self, background=self.background) def clone(self): - return copy.copy(self) + return copy.deepcopy(self) def mouse_enter(self, x, y): pass
dbt-labs__dbt-core-5991
[CT-1285] [Feature] extend `-f` flag shorthand to other commands ### Is this your first time submitting a feature request? - [X] I have read the [expectations for open source contributors](https://docs.getdbt.com/docs/contributing/oss-expectations) - [X] I have searched the existing issues, and I could not find an existing issue for this feature - [X] I am requesting a straightforward extension of existing dbt functionality, rather than a Big Idea better suited to a discussion ### Describe the feature PR #5908 added the `-f` shorthand for `--full-refresh`, but I accidentally only allowed it for the `dbt build` command. This shoul dbe extended to `dbt seed` and `dbt run` and ot any other command that takes `--full-refresh` ### Describe alternatives you've considered NA ### Who will this benefit? typers ### Are you interested in contributing this feature? yes ### Anything else? _No response_
[ { "content": "from typing import List\n\nfrom dbt.logger import log_cache_events, log_manager\n\nimport argparse\nimport os.path\nimport sys\nimport traceback\nimport warnings\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\nimport dbt.version\nfrom dbt.events.functions import fire_event, setup_event_logger\nfrom dbt.events.types import (\n MainEncounteredError,\n MainKeyboardInterrupt,\n MainReportVersion,\n MainReportArgs,\n MainTrackingUserState,\n MainStackTrace,\n)\nimport dbt.flags as flags\nimport dbt.task.build as build_task\nimport dbt.task.clean as clean_task\nimport dbt.task.compile as compile_task\nimport dbt.task.debug as debug_task\nimport dbt.task.deps as deps_task\nimport dbt.task.freshness as freshness_task\nimport dbt.task.generate as generate_task\nimport dbt.task.init as init_task\nimport dbt.task.list as list_task\nimport dbt.task.parse as parse_task\nimport dbt.task.run as run_task\nimport dbt.task.run_operation as run_operation_task\nimport dbt.task.seed as seed_task\nimport dbt.task.serve as serve_task\nimport dbt.task.snapshot as snapshot_task\nimport dbt.task.test as test_task\nfrom dbt.profiler import profiler\nfrom dbt.adapters.factory import reset_adapters, cleanup_connections\n\nimport dbt.tracking\n\nfrom dbt.utils import ExitCodes, args_to_dict\nfrom dbt.config.profile import read_user_config\nfrom dbt.exceptions import (\n Exception as dbtException,\n InternalException,\n NotImplementedException,\n FailedToConnectException,\n)\n\n\nclass DBTVersion(argparse.Action):\n \"\"\"This is very similar to the built-in argparse._Version action,\n except it just calls dbt.version.get_version_information().\n \"\"\"\n\n def __init__(\n self,\n option_strings,\n version=None,\n dest=argparse.SUPPRESS,\n default=argparse.SUPPRESS,\n help=\"show program's version number and exit\",\n ):\n super().__init__(\n option_strings=option_strings, dest=dest, default=default, nargs=0, help=help\n )\n\n def __call__(self, parser, namespace, values, option_string=None):\n formatter = argparse.RawTextHelpFormatter(prog=parser.prog)\n formatter.add_text(dbt.version.get_version_information())\n parser.exit(message=formatter.format_help())\n\n\nclass DBTArgumentParser(argparse.ArgumentParser):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.register(\"action\", \"dbtversion\", DBTVersion)\n\n def add_optional_argument_inverse(\n self,\n name,\n *,\n enable_help=None,\n disable_help=None,\n dest=None,\n no_name=None,\n default=None,\n ):\n mutex_group = self.add_mutually_exclusive_group()\n if not name.startswith(\"--\"):\n raise InternalException(\n 'cannot handle optional argument without \"--\" prefix: ' f'got \"{name}\"'\n )\n if dest is None:\n dest_name = name[2:].replace(\"-\", \"_\")\n else:\n dest_name = dest\n\n if no_name is None:\n no_name = f\"--no-{name[2:]}\"\n\n mutex_group.add_argument(\n name,\n action=\"store_const\",\n const=True,\n dest=dest_name,\n default=default,\n help=enable_help,\n )\n\n mutex_group.add_argument(\n f\"--no-{name[2:]}\",\n action=\"store_const\",\n const=False,\n dest=dest_name,\n default=default,\n help=disable_help,\n )\n\n return mutex_group\n\n\ndef main(args=None):\n # Logbook warnings are ignored so we don't have to fork logbook to support python 3.10.\n # This _only_ works for regular cli invocations.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"logbook\")\n if args is None:\n args = sys.argv[1:]\n with log_manager.applicationbound():\n try:\n results, succeeded = handle_and_check(args)\n if succeeded:\n exit_code = ExitCodes.Success.value\n else:\n exit_code = ExitCodes.ModelError.value\n\n except KeyboardInterrupt:\n # if the logger isn't configured yet, it will use the default logger\n fire_event(MainKeyboardInterrupt())\n exit_code = ExitCodes.UnhandledError.value\n\n # This can be thrown by eg. argparse\n except SystemExit as e:\n exit_code = e.code\n\n except BaseException as e:\n fire_event(MainEncounteredError(exc=str(e)))\n if not isinstance(e, dbtException):\n fire_event(MainStackTrace(stack_trace=traceback.format_exc()))\n exit_code = ExitCodes.UnhandledError.value\n\n sys.exit(exit_code)\n\n\n# here for backwards compatibility\ndef handle(args):\n res, success = handle_and_check(args)\n return res\n\n\n@contextmanager\ndef adapter_management():\n reset_adapters()\n try:\n yield\n finally:\n cleanup_connections()\n\n\ndef handle_and_check(args):\n with log_manager.applicationbound():\n parsed = parse_args(args)\n\n # Set flags from args, user config, and env vars\n user_config = read_user_config(flags.PROFILES_DIR) # This is read again later\n flags.set_from_args(parsed, user_config)\n dbt.tracking.initialize_from_flags()\n # Set log_format from flags\n parsed.cls.set_log_format()\n\n # we've parsed the args and set the flags - we can now decide if we're debug or not\n if flags.DEBUG:\n log_manager.set_debug()\n\n profiler_enabled = False\n\n if parsed.record_timing_info:\n profiler_enabled = True\n\n with profiler(enable=profiler_enabled, outfile=parsed.record_timing_info):\n\n with adapter_management():\n\n task, res = run_from_args(parsed)\n success = task.interpret_results(res)\n\n return res, success\n\n\n@contextmanager\ndef track_run(task):\n dbt.tracking.track_invocation_start(config=task.config, args=task.args)\n try:\n yield\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"ok\")\n except (NotImplementedException, FailedToConnectException) as e:\n fire_event(MainEncounteredError(exc=str(e)))\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"error\")\n except Exception:\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"error\")\n raise\n finally:\n dbt.tracking.flush()\n\n\ndef run_from_args(parsed):\n log_cache_events(getattr(parsed, \"log_cache_events\", False))\n\n # this will convert DbtConfigErrors into RuntimeExceptions\n # task could be any one of the task objects\n task = parsed.cls.from_args(args=parsed)\n\n # Set up logging\n log_path = None\n if task.config is not None:\n log_path = getattr(task.config, \"log_path\", None)\n log_manager.set_path(log_path)\n # if 'list' task: set stdout to WARN instead of INFO\n level_override = parsed.cls.pre_init_hook(parsed)\n setup_event_logger(log_path or \"logs\", level_override)\n\n fire_event(MainReportVersion(v=str(dbt.version.installed)))\n fire_event(MainReportArgs(args=args_to_dict(parsed)))\n\n if dbt.tracking.active_user is not None: # mypy appeasement, always true\n fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))\n\n results = None\n\n with track_run(task):\n results = task.run()\n return task, results\n\n\ndef _build_base_subparser():\n base_subparser = argparse.ArgumentParser(add_help=False)\n\n base_subparser.add_argument(\n \"--project-dir\",\n default=None,\n type=str,\n help=\"\"\"\n Which directory to look in for the dbt_project.yml file.\n Default is the current working directory and its parents.\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--profiles-dir\",\n default=None,\n dest=\"sub_profiles_dir\", # Main cli arg precedes subcommand\n type=str,\n help=\"\"\"\n Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--profile\",\n required=False,\n type=str,\n help=\"\"\"\n Which profile to load. Overrides setting in dbt_project.yml.\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"-t\",\n \"--target\",\n default=None,\n type=str,\n help=\"\"\"\n Which target to load for the given profile\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--vars\",\n type=str,\n default=\"{}\",\n help=\"\"\"\n Supply variables to the project. This argument overrides variables\n defined in your dbt_project.yml file. This argument should be a YAML\n string, eg. '{my_variable: my_value}'\n \"\"\",\n )\n\n # if set, log all cache events. This is extremely verbose!\n base_subparser.add_argument(\n \"--log-cache-events\",\n action=\"store_true\",\n help=argparse.SUPPRESS,\n )\n\n base_subparser.set_defaults(defer=None, state=None)\n return base_subparser\n\n\ndef _build_docs_subparser(subparsers, base_subparser):\n docs_sub = subparsers.add_parser(\n \"docs\",\n help=\"\"\"\n Generate or serve the documentation website for your project.\n \"\"\",\n )\n return docs_sub\n\n\ndef _build_source_subparser(subparsers, base_subparser):\n source_sub = subparsers.add_parser(\n \"source\",\n help=\"\"\"\n Manage your project's sources\n \"\"\",\n )\n return source_sub\n\n\ndef _build_init_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"init\",\n parents=[base_subparser],\n help=\"\"\"\n Initialize a new DBT project.\n \"\"\",\n )\n sub.add_argument(\n \"project_name\",\n nargs=\"?\",\n help=\"\"\"\n Name of the new DBT project.\n \"\"\",\n )\n sub.add_argument(\n \"-s\",\n \"--skip-profile-setup\",\n dest=\"skip_profile_setup\",\n action=\"store_true\",\n help=\"\"\"\n Skip interative profile setup.\n \"\"\",\n )\n sub.set_defaults(cls=init_task.InitTask, which=\"init\", rpc_method=None)\n return sub\n\n\ndef _build_build_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"build\",\n parents=[base_subparser],\n help=\"\"\"\n Run all Seeds, Models, Snapshots, and tests in DAG order\n \"\"\",\n )\n sub.set_defaults(cls=build_task.BuildTask, which=\"build\", rpc_method=\"build\")\n sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n sub.add_argument(\n \"--store-failures\",\n action=\"store_true\",\n help=\"\"\"\n Store test results (failing rows) in the database\n \"\"\",\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n\n resource_values: List[str] = [str(s) for s in build_task.BuildTask.ALL_RESOURCE_VALUES] + [\n \"all\"\n ]\n sub.add_argument(\n \"--resource-type\",\n choices=resource_values,\n action=\"append\",\n default=[],\n dest=\"resource_types\",\n )\n # explicity don't support --models\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n return sub\n\n\ndef _build_clean_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"clean\",\n parents=[base_subparser],\n help=\"\"\"\n Delete all folders in the clean-targets list\n (usually the dbt_packages and target directories.)\n \"\"\",\n )\n sub.set_defaults(cls=clean_task.CleanTask, which=\"clean\", rpc_method=None)\n return sub\n\n\ndef _build_debug_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"debug\",\n parents=[base_subparser],\n help=\"\"\"\n Show some helpful information about dbt for debugging.\n\n Not to be confused with the --debug option which increases verbosity.\n \"\"\",\n )\n sub.add_argument(\n \"--config-dir\",\n action=\"store_true\",\n help=\"\"\"\n If specified, DBT will show path information for this project\n \"\"\",\n )\n _add_version_check(sub)\n sub.set_defaults(cls=debug_task.DebugTask, which=\"debug\", rpc_method=None)\n return sub\n\n\ndef _build_deps_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"deps\",\n parents=[base_subparser],\n help=\"\"\"\n Pull the most recent version of the dependencies listed in packages.yml\n \"\"\",\n )\n sub.set_defaults(cls=deps_task.DepsTask, which=\"deps\", rpc_method=\"deps\")\n return sub\n\n\ndef _build_snapshot_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"snapshot\",\n parents=[base_subparser],\n help=\"\"\"\n Execute snapshots defined in your project\n \"\"\",\n )\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use while snapshotting tables.\n Overrides settings in profiles.yml.\n \"\"\",\n )\n sub.set_defaults(cls=snapshot_task.SnapshotTask, which=\"snapshot\", rpc_method=\"snapshot\")\n return sub\n\n\ndef _add_defer_argument(*subparsers):\n for sub in subparsers:\n sub.add_optional_argument_inverse(\n \"--defer\",\n enable_help=\"\"\"\n If set, defer to the state variable for resolving unselected nodes.\n \"\"\",\n disable_help=\"\"\"\n If set, do not defer to the state variable for resolving unselected\n nodes.\n \"\"\",\n default=flags.DEFER_MODE,\n )\n\n\ndef _build_run_subparser(subparsers, base_subparser):\n run_sub = subparsers.add_parser(\n \"run\",\n parents=[base_subparser],\n help=\"\"\"\n Compile SQL and execute against the current target database.\n \"\"\",\n )\n run_sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n\n run_sub.set_defaults(cls=run_task.RunTask, which=\"run\", rpc_method=\"run\")\n return run_sub\n\n\ndef _build_compile_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"compile\",\n parents=[base_subparser],\n help=\"\"\"\n Generates executable SQL from source, model, test, and analysis files.\n Compiled SQL files are written to the target/ directory.\n \"\"\",\n )\n sub.set_defaults(cls=compile_task.CompileTask, which=\"compile\", rpc_method=\"compile\")\n sub.add_argument(\"--parse-only\", action=\"store_true\")\n return sub\n\n\ndef _build_parse_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"parse\",\n parents=[base_subparser],\n help=\"\"\"\n Parses the project and provides information on performance\n \"\"\",\n )\n sub.set_defaults(cls=parse_task.ParseTask, which=\"parse\", rpc_method=\"parse\")\n sub.add_argument(\"--write-manifest\", action=\"store_true\")\n sub.add_argument(\"--compile\", action=\"store_true\")\n return sub\n\n\ndef _build_docs_generate_subparser(subparsers, base_subparser):\n # it might look like docs_sub is the correct parents entry, but that\n # will cause weird errors about 'conflicting option strings'.\n generate_sub = subparsers.add_parser(\"generate\", parents=[base_subparser])\n generate_sub.set_defaults(\n cls=generate_task.GenerateTask, which=\"generate\", rpc_method=\"docs.generate\"\n )\n generate_sub.add_argument(\n \"--no-compile\",\n action=\"store_false\",\n dest=\"compile\",\n help=\"\"\"\n Do not run \"dbt compile\" as part of docs generation\n \"\"\",\n )\n _add_defer_argument(generate_sub)\n return generate_sub\n\n\ndef _add_common_selector_arguments(sub):\n sub.add_argument(\n \"--exclude\",\n required=False,\n nargs=\"+\",\n help=\"\"\"\n Specify the models to exclude.\n \"\"\",\n )\n sub.add_argument(\n \"--selector\",\n dest=\"selector_name\",\n metavar=\"SELECTOR_NAME\",\n help=\"\"\"\n The selector name to use, as defined in selectors.yml\n \"\"\",\n )\n sub.add_argument(\n \"--state\",\n help=\"\"\"\n If set, use the given directory as the source for json files to\n compare with this project.\n \"\"\",\n type=Path,\n default=flags.ARTIFACT_STATE_PATH,\n )\n\n\ndef _add_selection_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"-m\",\n \"--models\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n\n\ndef _add_table_mutability_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"--full-refresh\",\n \"-f\",\n action=\"store_true\",\n help=\"\"\"\n If specified, dbt will drop incremental models and\n fully-recalculate the incremental table from the model definition.\n \"\"\",\n )\n\n\ndef _add_version_check(sub):\n sub.add_argument(\n \"--no-version-check\",\n dest=\"sub_version_check\", # main cli arg precedes subcommands\n action=\"store_false\",\n default=None,\n help=\"\"\"\n If set, skip ensuring dbt's version matches the one specified in\n the dbt_project.yml file ('require-dbt-version')\n \"\"\",\n )\n\n\ndef _add_common_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use while executing models. Overrides\n settings in profiles.yml.\n \"\"\",\n )\n sub.add_argument(\n \"--target-path\",\n required=False,\n help=\"\"\"\n Configure the 'target-path'. Only applies this setting for the\n current run. Overrides the 'DBT_TARGET_PATH' if it is set.\n \"\"\",\n )\n sub.add_argument(\n \"--log-path\",\n required=False,\n help=\"\"\"\n Configure the 'log-path'. Only applies this setting for the\n current run. Overrides the 'DBT_LOG_PATH' if it is set.\n \"\"\",\n )\n _add_version_check(sub)\n\n\ndef _build_seed_subparser(subparsers, base_subparser):\n seed_sub = subparsers.add_parser(\n \"seed\",\n parents=[base_subparser],\n help=\"\"\"\n Load data from csv files into your data warehouse.\n \"\"\",\n )\n seed_sub.add_argument(\n \"--full-refresh\",\n action=\"store_true\",\n help=\"\"\"\n Drop existing seed tables and recreate them\n \"\"\",\n )\n seed_sub.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"\"\"\n Show a sample of the loaded data in the terminal\n \"\"\",\n )\n seed_sub.set_defaults(cls=seed_task.SeedTask, which=\"seed\", rpc_method=\"seed\")\n return seed_sub\n\n\ndef _build_docs_serve_subparser(subparsers, base_subparser):\n serve_sub = subparsers.add_parser(\"serve\", parents=[base_subparser])\n serve_sub.add_argument(\n \"--port\",\n default=8080,\n type=int,\n help=\"\"\"\n Specify the port number for the docs server.\n \"\"\",\n )\n serve_sub.add_argument(\n \"--no-browser\",\n dest=\"open_browser\",\n action=\"store_false\",\n )\n serve_sub.set_defaults(cls=serve_task.ServeTask, which=\"serve\", rpc_method=None)\n return serve_sub\n\n\ndef _build_test_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"test\",\n parents=[base_subparser],\n help=\"\"\"\n Runs tests on data in deployed models. Run this after `dbt run`\n \"\"\",\n )\n sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first test failure.\n \"\"\",\n )\n sub.add_argument(\n \"--store-failures\",\n action=\"store_true\",\n help=\"\"\"\n Store test results (failing rows) in the database\n \"\"\",\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n\n sub.set_defaults(cls=test_task.TestTask, which=\"test\", rpc_method=\"test\")\n return sub\n\n\ndef _build_source_freshness_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"freshness\",\n parents=[base_subparser],\n help=\"\"\"\n Snapshots the current freshness of the project's sources\n \"\"\",\n aliases=[\"snapshot-freshness\"],\n )\n sub.add_argument(\n \"-o\",\n \"--output\",\n required=False,\n help=\"\"\"\n Specify the output path for the json report. By default, outputs to\n target/sources.json\n \"\"\",\n )\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use. Overrides settings in profiles.yml\n \"\"\",\n )\n sub.set_defaults(\n cls=freshness_task.FreshnessTask,\n which=\"source-freshness\",\n rpc_method=\"source-freshness\",\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n return sub\n\n\ndef _build_list_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"list\",\n parents=[base_subparser],\n help=\"\"\"\n List the resources in your project\n \"\"\",\n aliases=[\"ls\"],\n )\n sub.set_defaults(cls=list_task.ListTask, which=\"list\", rpc_method=None)\n resource_values: List[str] = [str(s) for s in list_task.ListTask.ALL_RESOURCE_VALUES] + [\n \"default\",\n \"all\",\n ]\n sub.add_argument(\n \"--resource-type\",\n choices=resource_values,\n action=\"append\",\n default=[],\n dest=\"resource_types\",\n )\n sub.add_argument(\"--output\", choices=[\"json\", \"name\", \"path\", \"selector\"], default=\"selector\")\n sub.add_argument(\"--output-keys\")\n\n sub.add_argument(\n \"-m\",\n \"--models\",\n dest=\"models\",\n nargs=\"+\",\n help=\"\"\"\n Specify the models to select and set the resource-type to 'model'.\n Mutually exclusive with '--select' (or '-s') and '--resource-type'\n \"\"\",\n metavar=\"SELECTOR\",\n required=False,\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n metavar=\"SELECTOR\",\n required=False,\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n\n return sub\n\n\ndef _build_run_operation_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"run-operation\",\n parents=[base_subparser],\n help=\"\"\"\n Run the named macro with any supplied arguments.\n \"\"\",\n )\n sub.add_argument(\n \"macro\",\n help=\"\"\"\n Specify the macro to invoke. dbt will call this macro with the supplied\n arguments and then exit\n \"\"\",\n )\n sub.add_argument(\n \"--args\",\n type=str,\n default=\"{}\",\n help=\"\"\"\n Supply arguments to the macro. This dictionary will be mapped to the\n keyword arguments defined in the selected macro. This argument should\n be a YAML string, eg. '{my_variable: my_value}'\n \"\"\",\n )\n sub.set_defaults(\n cls=run_operation_task.RunOperationTask, which=\"run-operation\", rpc_method=\"run-operation\"\n )\n return sub\n\n\ndef parse_args(args, cls=DBTArgumentParser):\n p = cls(\n prog=\"dbt\",\n description=\"\"\"\n An ELT tool for managing your SQL transformations and data models.\n For more documentation on these commands, visit: docs.getdbt.com\n \"\"\",\n epilog=\"\"\"\n Specify one of these sub-commands and you can find more help from\n there.\n \"\"\",\n )\n\n p.add_argument(\n \"--version\",\n action=\"dbtversion\",\n help=\"\"\"\n Show version information\n \"\"\",\n )\n\n p.add_argument(\n \"-r\",\n \"--record-timing-info\",\n default=None,\n type=str,\n help=\"\"\"\n When this option is passed, dbt will output low-level timing stats to\n the specified file. Example: `--record-timing-info output.profile`\n \"\"\",\n )\n\n p.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Display debug logging during dbt execution. Useful for debugging and\n making bug reports.\n \"\"\",\n )\n\n p.add_argument(\n \"--log-format\",\n choices=[\"text\", \"json\", \"default\"],\n default=None,\n help=\"\"\"Specify the log format, overriding the command's default.\"\"\",\n )\n\n p.add_argument(\n \"--no-write-json\",\n action=\"store_false\",\n default=None,\n dest=\"write_json\",\n help=\"\"\"\n If set, skip writing the manifest and run_results.json files to disk\n \"\"\",\n )\n colors_flag = p.add_mutually_exclusive_group()\n colors_flag.add_argument(\n \"--use-colors\",\n action=\"store_const\",\n const=True,\n default=None,\n dest=\"use_colors\",\n help=\"\"\"\n Colorize the output DBT prints to the terminal. Output is colorized by\n default and may also be set in a profile or at the command line.\n Mutually exclusive with --no-use-colors\n \"\"\",\n )\n colors_flag.add_argument(\n \"--no-use-colors\",\n action=\"store_const\",\n const=False,\n dest=\"use_colors\",\n help=\"\"\"\n Do not colorize the output DBT prints to the terminal. Output is\n colorized by default and may also be set in a profile or at the\n command line.\n Mutually exclusive with --use-colors\n \"\"\",\n )\n\n p.add_argument(\n \"--printer-width\",\n dest=\"printer_width\",\n help=\"\"\"\n Sets the width of terminal output\n \"\"\",\n )\n\n p.add_argument(\n \"--warn-error\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n If dbt would normally warn, instead raise an exception. Examples\n include --models that selects nothing, deprecations, configurations\n with no associated models, invalid test configurations, and missing\n sources/refs in tests.\n \"\"\",\n )\n\n p.add_argument(\n \"--no-version-check\",\n dest=\"version_check\",\n action=\"store_false\",\n default=None,\n help=\"\"\"\n If set, skip ensuring dbt's version matches the one specified in\n the dbt_project.yml file ('require-dbt-version')\n \"\"\",\n )\n\n p.add_optional_argument_inverse(\n \"--partial-parse\",\n enable_help=\"\"\"\n Allow for partial parsing by looking for and writing to a pickle file\n in the target directory. This overrides the user configuration file.\n \"\"\",\n disable_help=\"\"\"\n Disallow partial parsing. This overrides the user configuration file.\n \"\"\",\n )\n\n # if set, run dbt in single-threaded mode: thread count is ignored, and\n # calls go through `map` instead of the thread pool. This is useful for\n # getting performance information about aspects of dbt that normally run in\n # a thread, as the profiler ignores child threads. Users should really\n # never use this.\n p.add_argument(\n \"--single-threaded\",\n action=\"store_true\",\n help=argparse.SUPPRESS,\n )\n\n # if set, will use the latest features from the static parser instead of\n # the stable static parser.\n p.add_argument(\n \"--use-experimental-parser\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Enables experimental parsing features.\n \"\"\",\n )\n\n # if set, will disable the use of the stable static parser and instead\n # always rely on jinja rendering.\n p.add_argument(\n \"--no-static-parser\",\n default=None,\n dest=\"static_parser\",\n action=\"store_false\",\n help=\"\"\"\n Disables the static parser.\n \"\"\",\n )\n\n p.add_argument(\n \"--profiles-dir\",\n default=None,\n dest=\"profiles_dir\",\n type=str,\n help=\"\"\"\n Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/\n \"\"\",\n )\n\n p.add_argument(\n \"--no-anonymous-usage-stats\",\n action=\"store_false\",\n default=None,\n dest=\"send_anonymous_usage_stats\",\n help=\"\"\"\n Do not send anonymous usage stat to dbt Labs\n \"\"\",\n )\n\n p.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"fail_fast\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n\n p.add_argument(\n \"--event-buffer-size\",\n dest=\"event_buffer_size\",\n help=\"\"\"\n Sets the max number of events to buffer in EVENT_HISTORY\n \"\"\",\n )\n\n p.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Suppress all non-error logging to stdout. Does not affect\n {{ print() }} macro calls.\n \"\"\",\n )\n\n p.add_argument(\n \"--no-print\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Suppress all {{ print() }} macro calls.\n \"\"\",\n )\n\n schema_cache_flag = p.add_mutually_exclusive_group()\n schema_cache_flag.add_argument(\n \"--cache-selected-only\",\n action=\"store_const\",\n const=True,\n default=None,\n dest=\"cache_selected_only\",\n help=\"\"\"\n Pre cache database objects relevant to selected resource only.\n \"\"\",\n )\n schema_cache_flag.add_argument(\n \"--no-cache-selected-only\",\n action=\"store_const\",\n const=False,\n dest=\"cache_selected_only\",\n help=\"\"\"\n Pre cache all database objects related to the project.\n \"\"\",\n )\n\n subs = p.add_subparsers(title=\"Available sub-commands\")\n\n base_subparser = _build_base_subparser()\n\n # make the subcommands that have their own subcommands\n docs_sub = _build_docs_subparser(subs, base_subparser)\n docs_subs = docs_sub.add_subparsers(title=\"Available sub-commands\")\n source_sub = _build_source_subparser(subs, base_subparser)\n source_subs = source_sub.add_subparsers(title=\"Available sub-commands\")\n\n _build_init_subparser(subs, base_subparser)\n _build_clean_subparser(subs, base_subparser)\n _build_debug_subparser(subs, base_subparser)\n _build_deps_subparser(subs, base_subparser)\n _build_list_subparser(subs, base_subparser)\n\n build_sub = _build_build_subparser(subs, base_subparser)\n snapshot_sub = _build_snapshot_subparser(subs, base_subparser)\n run_sub = _build_run_subparser(subs, base_subparser)\n compile_sub = _build_compile_subparser(subs, base_subparser)\n parse_sub = _build_parse_subparser(subs, base_subparser)\n generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)\n test_sub = _build_test_subparser(subs, base_subparser)\n seed_sub = _build_seed_subparser(subs, base_subparser)\n # --threads, --no-version-check\n _add_common_arguments(\n run_sub, compile_sub, generate_sub, test_sub, seed_sub, parse_sub, build_sub\n )\n # --select, --exclude\n # list_sub sets up its own arguments.\n _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)\n # --defer\n _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)\n # --full-refresh\n _add_table_mutability_arguments(run_sub, compile_sub, build_sub)\n\n _build_docs_serve_subparser(docs_subs, base_subparser)\n _build_source_freshness_subparser(source_subs, base_subparser)\n _build_run_operation_subparser(subs, base_subparser)\n\n if len(args) == 0:\n p.print_help()\n sys.exit(1)\n\n parsed = p.parse_args(args)\n\n # profiles_dir is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_profiles_dir\"):\n if parsed.sub_profiles_dir is not None:\n parsed.profiles_dir = parsed.sub_profiles_dir\n delattr(parsed, \"sub_profiles_dir\")\n if hasattr(parsed, \"profiles_dir\"):\n if parsed.profiles_dir is None:\n parsed.profiles_dir = flags.PROFILES_DIR\n else:\n parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)\n # needs to be set before the other flags, because it's needed to\n # read the profile that contains them\n flags.PROFILES_DIR = parsed.profiles_dir\n\n # version_check is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_version_check\"):\n if parsed.sub_version_check is False:\n parsed.version_check = False\n delattr(parsed, \"sub_version_check\")\n\n # fail_fast is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_fail_fast\"):\n if parsed.sub_fail_fast is True:\n parsed.fail_fast = True\n delattr(parsed, \"sub_fail_fast\")\n\n if getattr(parsed, \"project_dir\", None) is not None:\n expanded_user = os.path.expanduser(parsed.project_dir)\n parsed.project_dir = os.path.abspath(expanded_user)\n\n if not hasattr(parsed, \"which\"):\n # the user did not provide a valid subcommand. trigger the help message\n # and exit with a error\n p.print_help()\n p.exit(1)\n\n return parsed\n", "path": "core/dbt/main.py" } ]
[ { "content": "from typing import List\n\nfrom dbt.logger import log_cache_events, log_manager\n\nimport argparse\nimport os.path\nimport sys\nimport traceback\nimport warnings\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\nimport dbt.version\nfrom dbt.events.functions import fire_event, setup_event_logger\nfrom dbt.events.types import (\n MainEncounteredError,\n MainKeyboardInterrupt,\n MainReportVersion,\n MainReportArgs,\n MainTrackingUserState,\n MainStackTrace,\n)\nimport dbt.flags as flags\nimport dbt.task.build as build_task\nimport dbt.task.clean as clean_task\nimport dbt.task.compile as compile_task\nimport dbt.task.debug as debug_task\nimport dbt.task.deps as deps_task\nimport dbt.task.freshness as freshness_task\nimport dbt.task.generate as generate_task\nimport dbt.task.init as init_task\nimport dbt.task.list as list_task\nimport dbt.task.parse as parse_task\nimport dbt.task.run as run_task\nimport dbt.task.run_operation as run_operation_task\nimport dbt.task.seed as seed_task\nimport dbt.task.serve as serve_task\nimport dbt.task.snapshot as snapshot_task\nimport dbt.task.test as test_task\nfrom dbt.profiler import profiler\nfrom dbt.adapters.factory import reset_adapters, cleanup_connections\n\nimport dbt.tracking\n\nfrom dbt.utils import ExitCodes, args_to_dict\nfrom dbt.config.profile import read_user_config\nfrom dbt.exceptions import (\n Exception as dbtException,\n InternalException,\n NotImplementedException,\n FailedToConnectException,\n)\n\n\nclass DBTVersion(argparse.Action):\n \"\"\"This is very similar to the built-in argparse._Version action,\n except it just calls dbt.version.get_version_information().\n \"\"\"\n\n def __init__(\n self,\n option_strings,\n version=None,\n dest=argparse.SUPPRESS,\n default=argparse.SUPPRESS,\n help=\"show program's version number and exit\",\n ):\n super().__init__(\n option_strings=option_strings, dest=dest, default=default, nargs=0, help=help\n )\n\n def __call__(self, parser, namespace, values, option_string=None):\n formatter = argparse.RawTextHelpFormatter(prog=parser.prog)\n formatter.add_text(dbt.version.get_version_information())\n parser.exit(message=formatter.format_help())\n\n\nclass DBTArgumentParser(argparse.ArgumentParser):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.register(\"action\", \"dbtversion\", DBTVersion)\n\n def add_optional_argument_inverse(\n self,\n name,\n *,\n enable_help=None,\n disable_help=None,\n dest=None,\n no_name=None,\n default=None,\n ):\n mutex_group = self.add_mutually_exclusive_group()\n if not name.startswith(\"--\"):\n raise InternalException(\n 'cannot handle optional argument without \"--\" prefix: ' f'got \"{name}\"'\n )\n if dest is None:\n dest_name = name[2:].replace(\"-\", \"_\")\n else:\n dest_name = dest\n\n if no_name is None:\n no_name = f\"--no-{name[2:]}\"\n\n mutex_group.add_argument(\n name,\n action=\"store_const\",\n const=True,\n dest=dest_name,\n default=default,\n help=enable_help,\n )\n\n mutex_group.add_argument(\n f\"--no-{name[2:]}\",\n action=\"store_const\",\n const=False,\n dest=dest_name,\n default=default,\n help=disable_help,\n )\n\n return mutex_group\n\n\ndef main(args=None):\n # Logbook warnings are ignored so we don't have to fork logbook to support python 3.10.\n # This _only_ works for regular cli invocations.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"logbook\")\n if args is None:\n args = sys.argv[1:]\n with log_manager.applicationbound():\n try:\n results, succeeded = handle_and_check(args)\n if succeeded:\n exit_code = ExitCodes.Success.value\n else:\n exit_code = ExitCodes.ModelError.value\n\n except KeyboardInterrupt:\n # if the logger isn't configured yet, it will use the default logger\n fire_event(MainKeyboardInterrupt())\n exit_code = ExitCodes.UnhandledError.value\n\n # This can be thrown by eg. argparse\n except SystemExit as e:\n exit_code = e.code\n\n except BaseException as e:\n fire_event(MainEncounteredError(exc=str(e)))\n if not isinstance(e, dbtException):\n fire_event(MainStackTrace(stack_trace=traceback.format_exc()))\n exit_code = ExitCodes.UnhandledError.value\n\n sys.exit(exit_code)\n\n\n# here for backwards compatibility\ndef handle(args):\n res, success = handle_and_check(args)\n return res\n\n\n@contextmanager\ndef adapter_management():\n reset_adapters()\n try:\n yield\n finally:\n cleanup_connections()\n\n\ndef handle_and_check(args):\n with log_manager.applicationbound():\n parsed = parse_args(args)\n\n # Set flags from args, user config, and env vars\n user_config = read_user_config(flags.PROFILES_DIR) # This is read again later\n flags.set_from_args(parsed, user_config)\n dbt.tracking.initialize_from_flags()\n # Set log_format from flags\n parsed.cls.set_log_format()\n\n # we've parsed the args and set the flags - we can now decide if we're debug or not\n if flags.DEBUG:\n log_manager.set_debug()\n\n profiler_enabled = False\n\n if parsed.record_timing_info:\n profiler_enabled = True\n\n with profiler(enable=profiler_enabled, outfile=parsed.record_timing_info):\n\n with adapter_management():\n\n task, res = run_from_args(parsed)\n success = task.interpret_results(res)\n\n return res, success\n\n\n@contextmanager\ndef track_run(task):\n dbt.tracking.track_invocation_start(config=task.config, args=task.args)\n try:\n yield\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"ok\")\n except (NotImplementedException, FailedToConnectException) as e:\n fire_event(MainEncounteredError(exc=str(e)))\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"error\")\n except Exception:\n dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type=\"error\")\n raise\n finally:\n dbt.tracking.flush()\n\n\ndef run_from_args(parsed):\n log_cache_events(getattr(parsed, \"log_cache_events\", False))\n\n # this will convert DbtConfigErrors into RuntimeExceptions\n # task could be any one of the task objects\n task = parsed.cls.from_args(args=parsed)\n\n # Set up logging\n log_path = None\n if task.config is not None:\n log_path = getattr(task.config, \"log_path\", None)\n log_manager.set_path(log_path)\n # if 'list' task: set stdout to WARN instead of INFO\n level_override = parsed.cls.pre_init_hook(parsed)\n setup_event_logger(log_path or \"logs\", level_override)\n\n fire_event(MainReportVersion(v=str(dbt.version.installed)))\n fire_event(MainReportArgs(args=args_to_dict(parsed)))\n\n if dbt.tracking.active_user is not None: # mypy appeasement, always true\n fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))\n\n results = None\n\n with track_run(task):\n results = task.run()\n return task, results\n\n\ndef _build_base_subparser():\n base_subparser = argparse.ArgumentParser(add_help=False)\n\n base_subparser.add_argument(\n \"--project-dir\",\n default=None,\n type=str,\n help=\"\"\"\n Which directory to look in for the dbt_project.yml file.\n Default is the current working directory and its parents.\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--profiles-dir\",\n default=None,\n dest=\"sub_profiles_dir\", # Main cli arg precedes subcommand\n type=str,\n help=\"\"\"\n Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--profile\",\n required=False,\n type=str,\n help=\"\"\"\n Which profile to load. Overrides setting in dbt_project.yml.\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"-t\",\n \"--target\",\n default=None,\n type=str,\n help=\"\"\"\n Which target to load for the given profile\n \"\"\",\n )\n\n base_subparser.add_argument(\n \"--vars\",\n type=str,\n default=\"{}\",\n help=\"\"\"\n Supply variables to the project. This argument overrides variables\n defined in your dbt_project.yml file. This argument should be a YAML\n string, eg. '{my_variable: my_value}'\n \"\"\",\n )\n\n # if set, log all cache events. This is extremely verbose!\n base_subparser.add_argument(\n \"--log-cache-events\",\n action=\"store_true\",\n help=argparse.SUPPRESS,\n )\n\n base_subparser.set_defaults(defer=None, state=None)\n return base_subparser\n\n\ndef _build_docs_subparser(subparsers, base_subparser):\n docs_sub = subparsers.add_parser(\n \"docs\",\n help=\"\"\"\n Generate or serve the documentation website for your project.\n \"\"\",\n )\n return docs_sub\n\n\ndef _build_source_subparser(subparsers, base_subparser):\n source_sub = subparsers.add_parser(\n \"source\",\n help=\"\"\"\n Manage your project's sources\n \"\"\",\n )\n return source_sub\n\n\ndef _build_init_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"init\",\n parents=[base_subparser],\n help=\"\"\"\n Initialize a new DBT project.\n \"\"\",\n )\n sub.add_argument(\n \"project_name\",\n nargs=\"?\",\n help=\"\"\"\n Name of the new DBT project.\n \"\"\",\n )\n sub.add_argument(\n \"-s\",\n \"--skip-profile-setup\",\n dest=\"skip_profile_setup\",\n action=\"store_true\",\n help=\"\"\"\n Skip interative profile setup.\n \"\"\",\n )\n sub.set_defaults(cls=init_task.InitTask, which=\"init\", rpc_method=None)\n return sub\n\n\ndef _build_build_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"build\",\n parents=[base_subparser],\n help=\"\"\"\n Run all Seeds, Models, Snapshots, and tests in DAG order\n \"\"\",\n )\n sub.set_defaults(cls=build_task.BuildTask, which=\"build\", rpc_method=\"build\")\n sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n sub.add_argument(\n \"--store-failures\",\n action=\"store_true\",\n help=\"\"\"\n Store test results (failing rows) in the database\n \"\"\",\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n\n resource_values: List[str] = [str(s) for s in build_task.BuildTask.ALL_RESOURCE_VALUES] + [\n \"all\"\n ]\n sub.add_argument(\n \"--resource-type\",\n choices=resource_values,\n action=\"append\",\n default=[],\n dest=\"resource_types\",\n )\n # explicity don't support --models\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n return sub\n\n\ndef _build_clean_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"clean\",\n parents=[base_subparser],\n help=\"\"\"\n Delete all folders in the clean-targets list\n (usually the dbt_packages and target directories.)\n \"\"\",\n )\n sub.set_defaults(cls=clean_task.CleanTask, which=\"clean\", rpc_method=None)\n return sub\n\n\ndef _build_debug_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"debug\",\n parents=[base_subparser],\n help=\"\"\"\n Show some helpful information about dbt for debugging.\n\n Not to be confused with the --debug option which increases verbosity.\n \"\"\",\n )\n sub.add_argument(\n \"--config-dir\",\n action=\"store_true\",\n help=\"\"\"\n If specified, DBT will show path information for this project\n \"\"\",\n )\n _add_version_check(sub)\n sub.set_defaults(cls=debug_task.DebugTask, which=\"debug\", rpc_method=None)\n return sub\n\n\ndef _build_deps_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"deps\",\n parents=[base_subparser],\n help=\"\"\"\n Pull the most recent version of the dependencies listed in packages.yml\n \"\"\",\n )\n sub.set_defaults(cls=deps_task.DepsTask, which=\"deps\", rpc_method=\"deps\")\n return sub\n\n\ndef _build_snapshot_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"snapshot\",\n parents=[base_subparser],\n help=\"\"\"\n Execute snapshots defined in your project\n \"\"\",\n )\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use while snapshotting tables.\n Overrides settings in profiles.yml.\n \"\"\",\n )\n sub.set_defaults(cls=snapshot_task.SnapshotTask, which=\"snapshot\", rpc_method=\"snapshot\")\n return sub\n\n\ndef _add_defer_argument(*subparsers):\n for sub in subparsers:\n sub.add_optional_argument_inverse(\n \"--defer\",\n enable_help=\"\"\"\n If set, defer to the state variable for resolving unselected nodes.\n \"\"\",\n disable_help=\"\"\"\n If set, do not defer to the state variable for resolving unselected\n nodes.\n \"\"\",\n default=flags.DEFER_MODE,\n )\n\n\ndef _build_run_subparser(subparsers, base_subparser):\n run_sub = subparsers.add_parser(\n \"run\",\n parents=[base_subparser],\n help=\"\"\"\n Compile SQL and execute against the current target database.\n \"\"\",\n )\n run_sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n\n run_sub.set_defaults(cls=run_task.RunTask, which=\"run\", rpc_method=\"run\")\n return run_sub\n\n\ndef _build_compile_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"compile\",\n parents=[base_subparser],\n help=\"\"\"\n Generates executable SQL from source, model, test, and analysis files.\n Compiled SQL files are written to the target/ directory.\n \"\"\",\n )\n sub.set_defaults(cls=compile_task.CompileTask, which=\"compile\", rpc_method=\"compile\")\n sub.add_argument(\"--parse-only\", action=\"store_true\")\n return sub\n\n\ndef _build_parse_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"parse\",\n parents=[base_subparser],\n help=\"\"\"\n Parses the project and provides information on performance\n \"\"\",\n )\n sub.set_defaults(cls=parse_task.ParseTask, which=\"parse\", rpc_method=\"parse\")\n sub.add_argument(\"--write-manifest\", action=\"store_true\")\n sub.add_argument(\"--compile\", action=\"store_true\")\n return sub\n\n\ndef _build_docs_generate_subparser(subparsers, base_subparser):\n # it might look like docs_sub is the correct parents entry, but that\n # will cause weird errors about 'conflicting option strings'.\n generate_sub = subparsers.add_parser(\"generate\", parents=[base_subparser])\n generate_sub.set_defaults(\n cls=generate_task.GenerateTask, which=\"generate\", rpc_method=\"docs.generate\"\n )\n generate_sub.add_argument(\n \"--no-compile\",\n action=\"store_false\",\n dest=\"compile\",\n help=\"\"\"\n Do not run \"dbt compile\" as part of docs generation\n \"\"\",\n )\n _add_defer_argument(generate_sub)\n return generate_sub\n\n\ndef _add_common_selector_arguments(sub):\n sub.add_argument(\n \"--exclude\",\n required=False,\n nargs=\"+\",\n help=\"\"\"\n Specify the models to exclude.\n \"\"\",\n )\n sub.add_argument(\n \"--selector\",\n dest=\"selector_name\",\n metavar=\"SELECTOR_NAME\",\n help=\"\"\"\n The selector name to use, as defined in selectors.yml\n \"\"\",\n )\n sub.add_argument(\n \"--state\",\n help=\"\"\"\n If set, use the given directory as the source for json files to\n compare with this project.\n \"\"\",\n type=Path,\n default=flags.ARTIFACT_STATE_PATH,\n )\n\n\ndef _add_selection_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"-m\",\n \"--models\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n\n\ndef _add_table_mutability_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"--full-refresh\",\n \"-f\",\n action=\"store_true\",\n help=\"\"\"\n If specified, dbt will drop incremental models and\n fully-recalculate the incremental table from the model definition.\n \"\"\",\n )\n\n\ndef _add_version_check(sub):\n sub.add_argument(\n \"--no-version-check\",\n dest=\"sub_version_check\", # main cli arg precedes subcommands\n action=\"store_false\",\n default=None,\n help=\"\"\"\n If set, skip ensuring dbt's version matches the one specified in\n the dbt_project.yml file ('require-dbt-version')\n \"\"\",\n )\n\n\ndef _add_common_arguments(*subparsers):\n for sub in subparsers:\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use while executing models. Overrides\n settings in profiles.yml.\n \"\"\",\n )\n sub.add_argument(\n \"--target-path\",\n required=False,\n help=\"\"\"\n Configure the 'target-path'. Only applies this setting for the\n current run. Overrides the 'DBT_TARGET_PATH' if it is set.\n \"\"\",\n )\n sub.add_argument(\n \"--log-path\",\n required=False,\n help=\"\"\"\n Configure the 'log-path'. Only applies this setting for the\n current run. Overrides the 'DBT_LOG_PATH' if it is set.\n \"\"\",\n )\n _add_version_check(sub)\n\n\ndef _build_seed_subparser(subparsers, base_subparser):\n seed_sub = subparsers.add_parser(\n \"seed\",\n parents=[base_subparser],\n help=\"\"\"\n Load data from csv files into your data warehouse.\n \"\"\",\n )\n seed_sub.add_argument(\n \"--full-refresh\",\n \"-f\",\n action=\"store_true\",\n help=\"\"\"\n Drop existing seed tables and recreate them\n \"\"\",\n )\n seed_sub.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"\"\"\n Show a sample of the loaded data in the terminal\n \"\"\",\n )\n seed_sub.set_defaults(cls=seed_task.SeedTask, which=\"seed\", rpc_method=\"seed\")\n return seed_sub\n\n\ndef _build_docs_serve_subparser(subparsers, base_subparser):\n serve_sub = subparsers.add_parser(\"serve\", parents=[base_subparser])\n serve_sub.add_argument(\n \"--port\",\n default=8080,\n type=int,\n help=\"\"\"\n Specify the port number for the docs server.\n \"\"\",\n )\n serve_sub.add_argument(\n \"--no-browser\",\n dest=\"open_browser\",\n action=\"store_false\",\n )\n serve_sub.set_defaults(cls=serve_task.ServeTask, which=\"serve\", rpc_method=None)\n return serve_sub\n\n\ndef _build_test_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"test\",\n parents=[base_subparser],\n help=\"\"\"\n Runs tests on data in deployed models. Run this after `dbt run`\n \"\"\",\n )\n sub.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"sub_fail_fast\",\n action=\"store_true\",\n help=\"\"\"\n Stop execution upon a first test failure.\n \"\"\",\n )\n sub.add_argument(\n \"--store-failures\",\n action=\"store_true\",\n help=\"\"\"\n Store test results (failing rows) in the database\n \"\"\",\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n\n sub.set_defaults(cls=test_task.TestTask, which=\"test\", rpc_method=\"test\")\n return sub\n\n\ndef _build_source_freshness_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"freshness\",\n parents=[base_subparser],\n help=\"\"\"\n Snapshots the current freshness of the project's sources\n \"\"\",\n aliases=[\"snapshot-freshness\"],\n )\n sub.add_argument(\n \"-o\",\n \"--output\",\n required=False,\n help=\"\"\"\n Specify the output path for the json report. By default, outputs to\n target/sources.json\n \"\"\",\n )\n sub.add_argument(\n \"--threads\",\n type=int,\n required=False,\n help=\"\"\"\n Specify number of threads to use. Overrides settings in profiles.yml\n \"\"\",\n )\n sub.set_defaults(\n cls=freshness_task.FreshnessTask,\n which=\"source-freshness\",\n rpc_method=\"source-freshness\",\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n return sub\n\n\ndef _build_list_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"list\",\n parents=[base_subparser],\n help=\"\"\"\n List the resources in your project\n \"\"\",\n aliases=[\"ls\"],\n )\n sub.set_defaults(cls=list_task.ListTask, which=\"list\", rpc_method=None)\n resource_values: List[str] = [str(s) for s in list_task.ListTask.ALL_RESOURCE_VALUES] + [\n \"default\",\n \"all\",\n ]\n sub.add_argument(\n \"--resource-type\",\n choices=resource_values,\n action=\"append\",\n default=[],\n dest=\"resource_types\",\n )\n sub.add_argument(\"--output\", choices=[\"json\", \"name\", \"path\", \"selector\"], default=\"selector\")\n sub.add_argument(\"--output-keys\")\n\n sub.add_argument(\n \"-m\",\n \"--models\",\n dest=\"models\",\n nargs=\"+\",\n help=\"\"\"\n Specify the models to select and set the resource-type to 'model'.\n Mutually exclusive with '--select' (or '-s') and '--resource-type'\n \"\"\",\n metavar=\"SELECTOR\",\n required=False,\n )\n sub.add_argument(\n \"-s\",\n \"--select\",\n dest=\"select\",\n nargs=\"+\",\n help=\"\"\"\n Specify the nodes to include.\n \"\"\",\n metavar=\"SELECTOR\",\n required=False,\n )\n sub.add_argument(\n \"--indirect-selection\",\n choices=[\"eager\", \"cautious\"],\n default=\"eager\",\n dest=\"indirect_selection\",\n help=\"\"\"\n Select all tests that are adjacent to selected resources,\n even if they those resources have been explicitly selected.\n \"\"\",\n )\n _add_common_selector_arguments(sub)\n\n return sub\n\n\ndef _build_run_operation_subparser(subparsers, base_subparser):\n sub = subparsers.add_parser(\n \"run-operation\",\n parents=[base_subparser],\n help=\"\"\"\n Run the named macro with any supplied arguments.\n \"\"\",\n )\n sub.add_argument(\n \"macro\",\n help=\"\"\"\n Specify the macro to invoke. dbt will call this macro with the supplied\n arguments and then exit\n \"\"\",\n )\n sub.add_argument(\n \"--args\",\n type=str,\n default=\"{}\",\n help=\"\"\"\n Supply arguments to the macro. This dictionary will be mapped to the\n keyword arguments defined in the selected macro. This argument should\n be a YAML string, eg. '{my_variable: my_value}'\n \"\"\",\n )\n sub.set_defaults(\n cls=run_operation_task.RunOperationTask, which=\"run-operation\", rpc_method=\"run-operation\"\n )\n return sub\n\n\ndef parse_args(args, cls=DBTArgumentParser):\n p = cls(\n prog=\"dbt\",\n description=\"\"\"\n An ELT tool for managing your SQL transformations and data models.\n For more documentation on these commands, visit: docs.getdbt.com\n \"\"\",\n epilog=\"\"\"\n Specify one of these sub-commands and you can find more help from\n there.\n \"\"\",\n )\n\n p.add_argument(\n \"--version\",\n action=\"dbtversion\",\n help=\"\"\"\n Show version information\n \"\"\",\n )\n\n p.add_argument(\n \"-r\",\n \"--record-timing-info\",\n default=None,\n type=str,\n help=\"\"\"\n When this option is passed, dbt will output low-level timing stats to\n the specified file. Example: `--record-timing-info output.profile`\n \"\"\",\n )\n\n p.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Display debug logging during dbt execution. Useful for debugging and\n making bug reports.\n \"\"\",\n )\n\n p.add_argument(\n \"--log-format\",\n choices=[\"text\", \"json\", \"default\"],\n default=None,\n help=\"\"\"Specify the log format, overriding the command's default.\"\"\",\n )\n\n p.add_argument(\n \"--no-write-json\",\n action=\"store_false\",\n default=None,\n dest=\"write_json\",\n help=\"\"\"\n If set, skip writing the manifest and run_results.json files to disk\n \"\"\",\n )\n colors_flag = p.add_mutually_exclusive_group()\n colors_flag.add_argument(\n \"--use-colors\",\n action=\"store_const\",\n const=True,\n default=None,\n dest=\"use_colors\",\n help=\"\"\"\n Colorize the output DBT prints to the terminal. Output is colorized by\n default and may also be set in a profile or at the command line.\n Mutually exclusive with --no-use-colors\n \"\"\",\n )\n colors_flag.add_argument(\n \"--no-use-colors\",\n action=\"store_const\",\n const=False,\n dest=\"use_colors\",\n help=\"\"\"\n Do not colorize the output DBT prints to the terminal. Output is\n colorized by default and may also be set in a profile or at the\n command line.\n Mutually exclusive with --use-colors\n \"\"\",\n )\n\n p.add_argument(\n \"--printer-width\",\n dest=\"printer_width\",\n help=\"\"\"\n Sets the width of terminal output\n \"\"\",\n )\n\n p.add_argument(\n \"--warn-error\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n If dbt would normally warn, instead raise an exception. Examples\n include --models that selects nothing, deprecations, configurations\n with no associated models, invalid test configurations, and missing\n sources/refs in tests.\n \"\"\",\n )\n\n p.add_argument(\n \"--no-version-check\",\n dest=\"version_check\",\n action=\"store_false\",\n default=None,\n help=\"\"\"\n If set, skip ensuring dbt's version matches the one specified in\n the dbt_project.yml file ('require-dbt-version')\n \"\"\",\n )\n\n p.add_optional_argument_inverse(\n \"--partial-parse\",\n enable_help=\"\"\"\n Allow for partial parsing by looking for and writing to a pickle file\n in the target directory. This overrides the user configuration file.\n \"\"\",\n disable_help=\"\"\"\n Disallow partial parsing. This overrides the user configuration file.\n \"\"\",\n )\n\n # if set, run dbt in single-threaded mode: thread count is ignored, and\n # calls go through `map` instead of the thread pool. This is useful for\n # getting performance information about aspects of dbt that normally run in\n # a thread, as the profiler ignores child threads. Users should really\n # never use this.\n p.add_argument(\n \"--single-threaded\",\n action=\"store_true\",\n help=argparse.SUPPRESS,\n )\n\n # if set, will use the latest features from the static parser instead of\n # the stable static parser.\n p.add_argument(\n \"--use-experimental-parser\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Enables experimental parsing features.\n \"\"\",\n )\n\n # if set, will disable the use of the stable static parser and instead\n # always rely on jinja rendering.\n p.add_argument(\n \"--no-static-parser\",\n default=None,\n dest=\"static_parser\",\n action=\"store_false\",\n help=\"\"\"\n Disables the static parser.\n \"\"\",\n )\n\n p.add_argument(\n \"--profiles-dir\",\n default=None,\n dest=\"profiles_dir\",\n type=str,\n help=\"\"\"\n Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/\n \"\"\",\n )\n\n p.add_argument(\n \"--no-anonymous-usage-stats\",\n action=\"store_false\",\n default=None,\n dest=\"send_anonymous_usage_stats\",\n help=\"\"\"\n Do not send anonymous usage stat to dbt Labs\n \"\"\",\n )\n\n p.add_argument(\n \"-x\",\n \"--fail-fast\",\n dest=\"fail_fast\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Stop execution upon a first failure.\n \"\"\",\n )\n\n p.add_argument(\n \"--event-buffer-size\",\n dest=\"event_buffer_size\",\n help=\"\"\"\n Sets the max number of events to buffer in EVENT_HISTORY\n \"\"\",\n )\n\n p.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Suppress all non-error logging to stdout. Does not affect\n {{ print() }} macro calls.\n \"\"\",\n )\n\n p.add_argument(\n \"--no-print\",\n action=\"store_true\",\n default=None,\n help=\"\"\"\n Suppress all {{ print() }} macro calls.\n \"\"\",\n )\n\n schema_cache_flag = p.add_mutually_exclusive_group()\n schema_cache_flag.add_argument(\n \"--cache-selected-only\",\n action=\"store_const\",\n const=True,\n default=None,\n dest=\"cache_selected_only\",\n help=\"\"\"\n Pre cache database objects relevant to selected resource only.\n \"\"\",\n )\n schema_cache_flag.add_argument(\n \"--no-cache-selected-only\",\n action=\"store_const\",\n const=False,\n dest=\"cache_selected_only\",\n help=\"\"\"\n Pre cache all database objects related to the project.\n \"\"\",\n )\n\n subs = p.add_subparsers(title=\"Available sub-commands\")\n\n base_subparser = _build_base_subparser()\n\n # make the subcommands that have their own subcommands\n docs_sub = _build_docs_subparser(subs, base_subparser)\n docs_subs = docs_sub.add_subparsers(title=\"Available sub-commands\")\n source_sub = _build_source_subparser(subs, base_subparser)\n source_subs = source_sub.add_subparsers(title=\"Available sub-commands\")\n\n _build_init_subparser(subs, base_subparser)\n _build_clean_subparser(subs, base_subparser)\n _build_debug_subparser(subs, base_subparser)\n _build_deps_subparser(subs, base_subparser)\n _build_list_subparser(subs, base_subparser)\n\n build_sub = _build_build_subparser(subs, base_subparser)\n snapshot_sub = _build_snapshot_subparser(subs, base_subparser)\n run_sub = _build_run_subparser(subs, base_subparser)\n compile_sub = _build_compile_subparser(subs, base_subparser)\n parse_sub = _build_parse_subparser(subs, base_subparser)\n generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)\n test_sub = _build_test_subparser(subs, base_subparser)\n seed_sub = _build_seed_subparser(subs, base_subparser)\n # --threads, --no-version-check\n _add_common_arguments(\n run_sub, compile_sub, generate_sub, test_sub, seed_sub, parse_sub, build_sub\n )\n # --select, --exclude\n # list_sub sets up its own arguments.\n _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)\n # --defer\n _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)\n # --full-refresh\n _add_table_mutability_arguments(run_sub, compile_sub, build_sub)\n\n _build_docs_serve_subparser(docs_subs, base_subparser)\n _build_source_freshness_subparser(source_subs, base_subparser)\n _build_run_operation_subparser(subs, base_subparser)\n\n if len(args) == 0:\n p.print_help()\n sys.exit(1)\n\n parsed = p.parse_args(args)\n\n # profiles_dir is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_profiles_dir\"):\n if parsed.sub_profiles_dir is not None:\n parsed.profiles_dir = parsed.sub_profiles_dir\n delattr(parsed, \"sub_profiles_dir\")\n if hasattr(parsed, \"profiles_dir\"):\n if parsed.profiles_dir is None:\n parsed.profiles_dir = flags.PROFILES_DIR\n else:\n parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)\n # needs to be set before the other flags, because it's needed to\n # read the profile that contains them\n flags.PROFILES_DIR = parsed.profiles_dir\n\n # version_check is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_version_check\"):\n if parsed.sub_version_check is False:\n parsed.version_check = False\n delattr(parsed, \"sub_version_check\")\n\n # fail_fast is set before subcommands and after, so normalize\n if hasattr(parsed, \"sub_fail_fast\"):\n if parsed.sub_fail_fast is True:\n parsed.fail_fast = True\n delattr(parsed, \"sub_fail_fast\")\n\n if getattr(parsed, \"project_dir\", None) is not None:\n expanded_user = os.path.expanduser(parsed.project_dir)\n parsed.project_dir = os.path.abspath(expanded_user)\n\n if not hasattr(parsed, \"which\"):\n # the user did not provide a valid subcommand. trigger the help message\n # and exit with a error\n p.print_help()\n p.exit(1)\n\n return parsed\n", "path": "core/dbt/main.py" } ]
diff --git a/.changes/unreleased/Features-20221003-110705.yaml b/.changes/unreleased/Features-20221003-110705.yaml new file mode 100644 index 00000000000..f8142666c3b --- /dev/null +++ b/.changes/unreleased/Features-20221003-110705.yaml @@ -0,0 +1,7 @@ +kind: Features +body: extend -f flag shorthand for seed command +time: 2022-10-03T11:07:05.381632-05:00 +custom: + Author: dave-connors-3 + Issue: "5990" + PR: "5991" diff --git a/core/dbt/main.py b/core/dbt/main.py index ba06ec12bb1..be75760890e 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -687,6 +687,7 @@ def _build_seed_subparser(subparsers, base_subparser): ) seed_sub.add_argument( "--full-refresh", + "-f", action="store_true", help=""" Drop existing seed tables and recreate them
scikit-image__scikit-image-1820
Deprecate Python 2.6 after release of 0.12
[ { "content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as osp\nimport imp\nimport functools\nimport warnings\nimport sys\n\npkg_dir = osp.abspath(osp.dirname(__file__))\ndata_dir = osp.join(pkg_dir, 'data')\n\n__version__ = '0.12dev'\n\ntry:\n imp.find_module('nose')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n from .util.dtype import *\n\ndel warnings, functools, osp, imp, sys\n", "path": "skimage/__init__.py" } ]
[ { "content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as osp\nimport imp\nimport functools\nimport warnings\nimport sys\n\npkg_dir = osp.abspath(osp.dirname(__file__))\ndata_dir = osp.join(pkg_dir, 'data')\n\n__version__ = '0.12dev'\n\ntry:\n imp.find_module('nose')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n from .util.dtype import *\n\n\nif sys.version.startswith('2.6'):\n warnings.warn(\"Python 2.6 is deprecated and will not be supported in scikit-image 0.13+\")\n\n\ndel warnings, functools, osp, imp, sys\n", "path": "skimage/__init__.py" } ]
diff --git a/TODO.txt b/TODO.txt index 02bd9ead1d0..3f73ebdbd7d 100644 --- a/TODO.txt +++ b/TODO.txt @@ -16,6 +16,7 @@ Version 0.14 Version 0.13 ------------ +* Require Python 2.7+, remove warning in `__init__.py`. * Remove deprecated `None` defaults for `skimage.exposure.rescale_intensity` * Remove deprecated `skimage.filters.canny` import in `filters/__init__.py` file (canny is now in `skimage.feature.canny`). diff --git a/skimage/__init__.py b/skimage/__init__.py index 2b1521563fe..673562a7c76 100644 --- a/skimage/__init__.py +++ b/skimage/__init__.py @@ -156,4 +156,9 @@ def _raise_build_error(e): _raise_build_error(e) from .util.dtype import * + +if sys.version.startswith('2.6'): + warnings.warn("Python 2.6 is deprecated and will not be supported in scikit-image 0.13+") + + del warnings, functools, osp, imp, sys
zestedesavoir__zds-site-3857
[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500 Serveur : Beta Version : v20-RC3/d3fd8af Système : Mac OS X Navigateur : 52.0.2743.116 (64-bit) --- 1. Rendez-vous à la page d'inscription et renseigner un pseudo du type : 👚 test 2. Remplissez les autres champs. 3. Soumettez le formulaire. 4. Constatez une erreur 500. Note : Vous pouvez reproduire la même erreur en tentant de vous connecter avec le même pseudo ou en faisant une recherche sur le pseudo d'un membre.
[ { "content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n return not all(len(c.encode('utf-8')) <= 3 for c in s)\n", "path": "zds/utils/misc.py" } ]
[ { "content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n return s != re_pattern.sub(u'\\uFFFD', s)\n", "path": "zds/utils/misc.py" } ]
diff --git a/zds/utils/misc.py b/zds/utils/misc.py index 33cc259cc0..20c63afbaf 100644 --- a/zds/utils/misc.py +++ b/zds/utils/misc.py @@ -53,4 +53,5 @@ def contains_utf8mb4(s): """ if not isinstance(s, unicode): s = unicode(s, 'utf-8') - return not all(len(c.encode('utf-8')) <= 3 for c in s) + re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE) + return s != re_pattern.sub(u'\uFFFD', s) diff --git a/zds/utils/tests/misc.py b/zds/utils/tests/test_misc.py similarity index 100% rename from zds/utils/tests/misc.py rename to zds/utils/tests/test_misc.py
angr__angr-2256
The version of CFFI>=1.7.0 maybe not correct <!-- *Disclaimer: The angr suite is maintained by a small team of volunteers. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)* --> --- **Describe the bug.** <!-- Please include a clear and concise description of what the bug is. --> I encounter a error, like this: ``` File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/angr/project.py", line 131, in __init__ self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options) File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 133, in __init__ self.initial_load_objects = self._internal_load(main_binary, *preload_libs, *force_load_libs, preloading=(main_binary, *preload_libs)) File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 652, in _internal_load obj = self._load_object_isolated(main_spec) File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 832, in _load_object_isolated result = backend_cls(binary, binary_stream, is_main_bin=self.main_object is None, loader=self, **options) File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/elf.py", line 152, in __init__ self._load_plt() File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 90, in _load_plt sanity_check=not self.pic) File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 49, in _add_plt_stub if sanity_check and target_addr not in [c.value for c in self._block(addr, skip_stmts=False).all_constants]: File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 42, in _block return pyvex.IRSB(dat, addr, self.arch, bytes_offset=1 if thumb else 0, opt_level=1, skip_stmts=skip_stmts) File "/usr/local/lib/python3.6/dist-packages/pyvex/block.py", line 115, in __init__ cross_insn_opt=cross_insn_opt, File "/usr/local/lib/python3.6/dist-packages/pyvex/lifting/__init__.py", line 83, in lift u_data = ffi.from_buffer(ffi.BVoidP, py_data + b'\0' * 8 if type(py_data) is bytes else py_data) File "/home/ling/.local/lib/python3.6/site-packages/cffi/api.py", line 362, in from_buffer require_writable) TypeError: expected an array ctype, got 'void *' ``` and I solve it by upgrade cffi from cffi-1.12.2 to cffi-1.14.0. In the setup.py of angr, the version of cffi is only required >=1.7.0 **Environment Information.** <!-- Many common issues are caused by problems with the local Python environment. Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date. Please include the output of `python -m angr.misc.bug_report` here. --> **To Reproduce.** <!-- Please include *both a script to reproduce the crash, and attach the binary used, if possible* --> angr is v8.20.7.6 **Additional context.** <!-- Add any other context about the problem here. -->
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\nimport glob\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/appendix/migration\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\nfrom distutils.command.clean import clean as _clean\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\ndef _clean_native():\n oglob = glob.glob('native/*.o')\n oglob += glob.glob('native/*.obj')\n oglob += glob.glob('native/*.so')\n oglob += glob.glob('native/*.dll')\n oglob += glob.glob('native/*.dylib')\n for fname in oglob:\n os.unlink(fname)\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\nclass clean(_clean):\n def run(self, *args):\n self.execute(_clean_native, (), msg='Cleaning angr_native')\n _clean.run(self, *args)\n\ncmdclass = {\n 'build': build,\n 'clean': clean,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\n_UNICORN = \"unicorn>=1.0.2rc2\"\n\nsetup(\n name='angr',\n version='8.20.7.6',\n python_requires='>=3.6',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'sortedcontainers',\n 'cachetools',\n 'capstone>=3.0.5rc2',\n 'dpkt',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar2',\n 'rpyc',\n 'cffi>=1.7.0',\n _UNICORN,\n 'archinfo==8.20.7.6',\n 'claripy==8.20.7.6',\n 'cle==8.20.7.6',\n 'pyvex==8.20.7.6',\n 'ailment==8.20.7.6',\n 'GitPython',\n 'psutil',\n 'pycparser>=2.18',\n 'itanium_demangler',\n 'CppHeaderParser',\n 'protobuf',\n ],\n setup_requires=[_UNICORN, 'pyvex'],\n extras_require={\n 'AngrDB': ['sqlalchemy'],\n },\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*', \"py.typed\"]\n }\n)\n", "path": "setup.py" } ]
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\nimport glob\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/appendix/migration\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\nfrom distutils.command.clean import clean as _clean\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\ndef _clean_native():\n oglob = glob.glob('native/*.o')\n oglob += glob.glob('native/*.obj')\n oglob += glob.glob('native/*.so')\n oglob += glob.glob('native/*.dll')\n oglob += glob.glob('native/*.dylib')\n for fname in oglob:\n os.unlink(fname)\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\nclass clean(_clean):\n def run(self, *args):\n self.execute(_clean_native, (), msg='Cleaning angr_native')\n _clean.run(self, *args)\n\ncmdclass = {\n 'build': build,\n 'clean': clean,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\n_UNICORN = \"unicorn>=1.0.2rc2\"\n\nsetup(\n name='angr',\n version='8.20.7.6',\n python_requires='>=3.6',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'sortedcontainers',\n 'cachetools',\n 'capstone>=3.0.5rc2',\n 'dpkt',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar2',\n 'rpyc',\n 'cffi>=1.14.0',\n _UNICORN,\n 'archinfo==8.20.7.6',\n 'claripy==8.20.7.6',\n 'cle==8.20.7.6',\n 'pyvex==8.20.7.6',\n 'ailment==8.20.7.6',\n 'GitPython',\n 'psutil',\n 'pycparser>=2.18',\n 'itanium_demangler',\n 'CppHeaderParser',\n 'protobuf',\n ],\n setup_requires=[_UNICORN, 'pyvex'],\n extras_require={\n 'AngrDB': ['sqlalchemy'],\n },\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*', \"py.typed\"]\n }\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index e5a96bcfb79..1928bbd670e 100644 --- a/setup.py +++ b/setup.py @@ -147,7 +147,7 @@ def run(self, *args): 'networkx>=2.0', 'progressbar2', 'rpyc', - 'cffi>=1.7.0', + 'cffi>=1.14.0', _UNICORN, 'archinfo==8.20.7.6', 'claripy==8.20.7.6',